python类sparse_tensor_dense_matmul()的实例源码

linear.py 文件源码 项目:TensorGraph 作者: hycis 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _train_fprop(self, state_below):
        idx, val = state_below
        X = tf.SparseTensor(tf.cast(idx, 'int64'), val, shape=[self.batchsize, self.prev_dim])
        X_order = tf.sparse_reorder(X)
        XW = tf.sparse_tensor_dense_matmul(X_order, self.W, adjoint_a=False, adjoint_b=False)
        return tf.add(XW, self.b)
representation_graphs.py 文件源码 项目:tensorrec 作者: jfkirk 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def linear_representation_graph(tf_features, n_components, n_features, node_name_ending):

    # Create variable nodes
    tf_linear_weights = tf.Variable(tf.random_normal([n_features, n_components], stddev=.5),
                                    name='linear_weights_%s' % node_name_ending)
    tf_repr = tf.sparse_tensor_dense_matmul(tf_features, tf_linear_weights)

    # Return repr layer and variables
    return tf_repr, [tf_linear_weights]
model.py 文件源码 项目:MOQA 作者: pprakhar30 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calc_log_loss(self, Pairwise, Question, Answer, Review, TermtoTermR, TermtoTermP, Question_I, Answer_I, Review_I):

        #print 'Doing for item %d'%(i)

        shape1      = tf.shape(Pairwise)
        shape2      = tf.shape(Answer)

        nq      = shape1[0]
        nr      = shape1[1]
        na      = shape2[1]

        pairwise    = tf.reshape(Pairwise, [-1, self.PairwiseDim])
        pairwise    = tf.reshape(tf.matmul(pairwise, self.theta), [nq, nr])

        termTotermR     = tf.sparse_reshape(TermtoTermR, [-1, self.V])
        termTotermR     = tf.reshape(tf.sparse_tensor_dense_matmul(termTotermR, self.RelvPar), [nq, nr])

        QProj       = tf.sparse_tensor_dense_matmul(Question_I, self.A)
        RProjR      = tf.sparse_tensor_dense_matmul(Review_I, self.B)
        BilinearR   = tf.matmul(QProj, tf.transpose(RProjR))

        Relevance   = tf.nn.softmax(pairwise + termTotermR + BilinearR)

        termTotermP     = tf.sparse_reshape(TermtoTermP, [-1, self.V])
        termTotermP     = tf.reshape(tf.sparse_tensor_dense_matmul(termTotermP, self.PredPar), [nq, na, nr])

        AProj       = tf.sparse_tensor_dense_matmul(tf.sparse_reshape(Answer_I, [-1, self.V]), self.X)
        RProjP      = tf.sparse_tensor_dense_matmul(Review_I, self.Y)
        BilinearP   = tf.reshape(tf.matmul(AProj, tf.transpose(RProjP)), [nq, na, nr])

        Prediction  = BilinearP + termTotermP
        Prediction      = tf.expand_dims(Prediction[:,0,:], 1) - Prediction
        Prediction  = Prediction[:,1:,:]
        Prediction  = tf.sigmoid(Prediction)

        MoE         = tf.reduce_sum(tf.multiply(Prediction, tf.expand_dims(Relevance, axis = 1)), axis = 2)
        accuracy_count  = tf.cast(tf.shape(tf.where(MoE > 0.5))[0], tf.float64)
        count       = nq * na 

        log_likelihood  = tf.reduce_sum(tf.log(MoE))
        R1      = tf.reduce_sum(tf.square(self.A)) + tf.reduce_sum(tf.square(self.B)) 
        R2      = tf.reduce_sum(tf.square(self.X)) + tf.reduce_sum(tf.square(self.Y))

        log_likelihood  -= self.Lambda * (R1 + R2)

        return -1*log_likelihood, MoE, Relevance
utils.py 文件源码 项目:RFHO 作者: lucfra 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def matmul(a, b, benchmark=True, name='mul'):  # TODO maybe put inside dot
    """
    Interface function for matmul that works also with sparse tensors

    :param a:
    :param b:
    :param benchmark:
    :param name:
    :return:
    """
    a_is_sparse = isinstance(a, tf.SparseTensor)
    with tf.name_scope(name):
        if a_is_sparse:
            mul = wsr(tf.matmul(tf.sparse_tensor_to_dense(a, default_value=0.), b))
            if benchmark:
                mul_ops = [wsr(tf.sparse_tensor_dense_matmul(a, b)),
                           mul,  # others ?
                           # wsr(tf.nn.embedding_lookup_sparse())  # I couldn't figure out how this works......
                           ]

                def _avg_exe_times(op, repetitions):
                    from time import time
                    ex_times = []
                    for _ in range(repetitions):
                        st = time()
                        op.eval()
                        ex_times.append(time() - st)
                    return np.mean(ex_times[1:]), np.max(ex_times), np.min(ex_times)

                with tf.Session(config=CONFIG_GPU_GROWTH).as_default():
                    tf.global_variables_initializer().run()  # TODO here should only initialize necessary variable
                    # (downstream in the computation graph)

                    statistics = {op: _avg_exe_times(op, repetitions=4) for op in mul_ops}

                [print(k, v) for k, v in statistics.items()]

                mul = sorted(statistics.items(), key=lambda v: v[1][0])[0][0]  # returns best one w.r.t. avg exe time

                print(mul, 'selected')

        else:
            mul = wsr(tf.matmul(a, b))
    return mul


# Define a context manager to suppress stdout and stderr.
core.py 文件源码 项目:muffnn 作者: civisanalytics 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def affine(input_tensor, output_size, bias=True, bias_start=0.0,
           input_size=None, scope="affine", sparse_input=False):
    """Add an affine transformation of `input_tensor` to the current graph.

    Note: This op is loosely based on tensorflow.python.ops.rnn_cell.linear.

    An affine transformation is a linear transformation with a shift,
    `t = tf.matmul(input_tensor, W) + b`.

    Parameters
    ----------
    input_tensor : tensorflow Tensor object, rank 2
        Input tensor to be transformed.
    output_size : int
        The output will be size [a, output_size] where `input_tensor` has
        shape [a, b].
    bias : bool, optional
        If True, apply a bias to the transformation. If False, only a linear
        transformation is applied (i.e., `t = tf.matmul(W, input_tensor)`).
    bias_start : float, optional
        The initial value for the bias `b`.
    input_size : int, optional
        Second dimension of the rank 2 input tensor. Required for sparse input
        tensors.
    sparse_input : bool, optional
        Set to True if `input_tensor` is sparse.

    Returns
    -------
    t : tensorflow tensor object
        The affine transformation of `input_tensor`.
    """

    # The input size is needed for sparse matrices.
    if input_size is None:
        input_size = input_tensor.get_shape().as_list()[1]

    with tf.variable_scope(scope):
        W_0 = tf.get_variable(
            "weights0",
            [input_size, output_size])
        # If the input is sparse, then use a special matmul routine.
        matmul = tf.sparse_tensor_dense_matmul if sparse_input else tf.matmul
        t = matmul(input_tensor, W_0)

        if bias:
            b_0 = tf.get_variable(
                "bias0",
                [output_size],
                initializer=tf.constant_initializer(bias_start))
            t = tf.add(t, b_0)
    return t
models.py 文件源码 项目:product-nets 作者: Atomu2014 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, field_sizes=None, embed_size=10, layer_sizes=None, layer_acts=None, drop_out=None,
                 embed_l2=None, layer_l2=None, init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None):
        Model.__init__(self)
        init_vars = []
        num_inputs = len(field_sizes)
        for i in range(num_inputs):
            init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype))
        node_in = num_inputs * embed_size
        for i in range(len(layer_sizes)):
            init_vars.append(('w%d' % i, [node_in, layer_sizes[i]], 'xavier', dtype))
            init_vars.append(('b%d' % i, [layer_sizes[i]], 'zero', dtype))
            node_in = layer_sizes[i]
        self.graph = tf.Graph()
        with self.graph.as_default():
            if random_seed is not None:
                tf.set_random_seed(random_seed)
            self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]
            self.y = tf.placeholder(dtype)
            self.keep_prob_train = 1 - np.array(drop_out)
            self.keep_prob_test = np.ones_like(drop_out)
            self.layer_keeps = tf.placeholder(dtype)
            self.vars = utils.init_var_map(init_vars, init_path)
            w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)]
            xw = tf.concat([tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)], 1)
            l = xw

            for i in range(len(layer_sizes)):
                wi = self.vars['w%d' % i]
                bi = self.vars['b%d' % i]
                print(l.shape, wi.shape, bi.shape)
                l = tf.nn.dropout(
                    utils.activate(
                        tf.matmul(l, wi) + bi,
                        layer_acts[i]),
                    self.layer_keeps[i])

            l = tf.squeeze(l)
            self.y_prob = tf.sigmoid(l)

            self.loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))
            if layer_l2 is not None:
                self.loss += embed_l2 * tf.nn.l2_loss(xw)
                for i in range(len(layer_sizes)):
                    wi = self.vars['w%d' % i]
                    self.loss += layer_l2[i] * tf.nn.l2_loss(wi)
            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=config)
            tf.global_variables_initializer().run(session=self.sess)
models.py 文件源码 项目:product-nets 作者: Atomu2014 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, field_sizes=None, embed_size=10, filter_sizes=None, layer_acts=None, drop_out=None,
                 init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None):
        Model.__init__(self)
        init_vars = []
        num_inputs = len(field_sizes)
        for i in range(num_inputs):
            init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype))
        init_vars.append(('f1', [embed_size, filter_sizes[0], 1, 2], 'xavier', dtype))
        init_vars.append(('f2', [embed_size, filter_sizes[1], 2, 2], 'xavier', dtype))
        init_vars.append(('w1', [2 * 3 * embed_size, 1], 'xavier', dtype))
        init_vars.append(('b1', [1], 'zero', dtype))

        self.graph = tf.Graph()
        with self.graph.as_default():
            if random_seed is not None:
                tf.set_random_seed(random_seed)
            self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]
            self.y = tf.placeholder(dtype)
            self.keep_prob_train = 1 - np.array(drop_out)
            self.keep_prob_test = np.ones_like(drop_out)
            self.layer_keeps = tf.placeholder(dtype)
            self.vars = utils.init_var_map(init_vars, init_path)
            w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)]
            xw = tf.concat([tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)], 1)
            l = xw

            l = tf.transpose(tf.reshape(l, [-1, num_inputs, embed_size, 1]), [0, 2, 1, 3])
            f1 = self.vars['f1']
            l = tf.nn.conv2d(l, f1, [1, 1, 1, 1], 'SAME')
            l = tf.transpose(
                utils.max_pool_4d(
                    tf.transpose(l, [0, 1, 3, 2]),
                    int(num_inputs / 2)),
                [0, 1, 3, 2])
            f2 = self.vars['f2']
            l = tf.nn.conv2d(l, f2, [1, 1, 1, 1], 'SAME')
            l = tf.transpose(
                utils.max_pool_4d(
                    tf.transpose(l, [0, 1, 3, 2]), 3),
                [0, 1, 3, 2])
            l = tf.nn.dropout(
                utils.activate(
                    tf.reshape(l, [-1, embed_size * 3 * 2]),
                    layer_acts[0]),
                self.layer_keeps[0])
            w1 = self.vars['w1']
            b1 = self.vars['b1']
            l = tf.matmul(l, w1) + b1

            l = tf.squeeze(l)
            self.y_prob = tf.sigmoid(l)

            self.loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))
            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=config)
            tf.global_variables_initializer().run(session=self.sess)


问题


面经


文章

微信
公众号

扫码关注公众号