python类sparse_tensor_dense_matmul()的实例源码

test_tensorrec.py 文件源码 项目:tensorrec 作者: jfkirk 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_custom_repr_graph(self):
        # Define a custom representation function graph
        def build_tanh_representation_graph(tf_features, n_components, n_features, node_name_ending):
            tf_tanh_weights = tf.Variable(tf.random_normal([n_features, n_components],
                                                           stddev=.5),
                                          name='tanh_weights_%s' % node_name_ending)

            tf_repr = tf.nn.tanh(tf.sparse_tensor_dense_matmul(tf_features, tf_tanh_weights))

            # Return repr layer and variables
            return tf_repr, [tf_tanh_weights]

        # Build a model with the custom representation function
        model = TensorRec(user_repr_graph=build_tanh_representation_graph,
                          item_repr_graph=build_tanh_representation_graph)

        self.assertIsNotNone(model)
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def dot(x, y):
    '''Multiplies 2 tensors.
    When attempting to multiply a ND tensor
    with a ND tensor, reproduces the Theano behavior
    (e.g. (2, 3).(4, 3, 5) = (2, 4, 5))
    '''
    if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
        x_shape = (-1,) + int_shape(x)[1:]
        y_shape = int_shape(y)
        y_permute_dim = list(range(ndim(y)))
        y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
        xt = tf.reshape(x, [-1, x_shape[-1]])
        yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
        return tf.reshape(tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
    if is_sparse(x):
        out = tf.sparse_tensor_dense_matmul(x, y)
    else:
        out = tf.matmul(x, y)
    return out
dizzyRNNCellv2.py 文件源码 项目:dizzy_layer 作者: Pastromhaug 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def DizzyLayerV2(X, rot_list, n):
    n_prime = int(n*(n-1)/2)
    thetas = tf.Variable(tf.random_uniform([n_prime, 1], 0, 2*math.pi), name="thetas")

    results = [X]
    k = 0
    for sublist in rot_list:
        indices = []
        values = []
        for (a, b) in sublist:
            c = tf.cos(thetas[k])
            s = tf.sin(thetas[k])
            indices = indices + [[a, a], [a, b], [b, a], [b, b]]
            values = values + [c, s, -s, c]
            k += 1
        shape = [n, n]
        v = tf.pack(tf.squeeze(values))
        R = tf.SparseTensor(indices, v, shape)
        results.append(tf.sparse_tensor_dense_matmul(R, results[-1]))
    return results[-1]
model_base.py 文件源码 项目:wip-constrained-extractor 作者: brain-research 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __init__(self, model_inputs, rouge_scorer, hps):

    self.word_embedding = tf.get_variable(
        "word_embedding", [hps.vocab_size, hps.word_embedding_size])
    self.article_inputs = tf.nn.embedding_lookup(self.word_embedding,
                                                 model_inputs.sliced_article)

    self.stopworded_abstract_bag = tf.transpose(
        tf.sparse_tensor_dense_matmul(
            rouge_scorer.stem_projector_stopworded,
            tf.to_float(model_inputs.abstract_bag),
            adjoint_a=True,
            adjoint_b=True))

    with tf.variable_scope("article_enc"):

      article_outs = shared_util.deep_birnn(hps, self.article_inputs,
                                            model_inputs.article_len)

      self.article_feats = shared_util.relu(article_outs, hps.hidden_size)

    with tf.variable_scope("scorer"):
      self.word_logits = tf.reshape(
          shared_util.linear(self.article_feats, 1), [hps.batch_size, -1])
representation_graphs.py 文件源码 项目:tensorrec 作者: jfkirk 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def relu_representation_graph(tf_features, n_components, n_features, node_name_ending):
    relu_size = 4 * n_components

    # Create variable nodes
    tf_relu_weights = tf.Variable(tf.random_normal([n_features, relu_size], stddev=.5),
                                  name='relu_weights_%s' % node_name_ending)
    tf_relu_biases = tf.Variable(tf.zeros([1, relu_size]),
                                 name='relu_biases_%s' % node_name_ending)
    tf_linear_weights = tf.Variable(tf.random_normal([relu_size, n_components], stddev=.5),
                                    name='linear_weights_%s' % node_name_ending)

    # Create ReLU layer
    tf_relu = tf.nn.relu(tf.add(tf.sparse_tensor_dense_matmul(tf_features, tf_relu_weights),
                                tf_relu_biases))
    tf_repr = tf.matmul(tf_relu, tf_linear_weights)

    # Return repr layer and variables
    return tf_repr, [tf_relu_weights, tf_linear_weights, tf_relu_biases]
SparseFullyConnectedLayer.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def call(self, x, mask=None):
        #sys.stderr.write("sparse fuylly connected layer input data %s type:%s\n" % (x.name, K.type(x)))
        #sys.stderr.write("sparse fuylly connected layer weight type:%s\n" % (K.type(self.W)))
        print(str(K.ndim(x)))
        return self.activation(tf.sparse_tensor_dense_matmul(x, self.W) + self.b)
layers.py 文件源码 项目:gae 作者: tkipf 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _call(self, inputs):
        x = inputs
        x = tf.nn.dropout(x, 1-self.dropout)
        x = tf.matmul(x, self.vars['weights'])
        x = tf.sparse_tensor_dense_matmul(self.adj, x)
        outputs = self.act(x)
        return outputs
layers.py 文件源码 项目:gae 作者: tkipf 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _call(self, inputs):
        x = inputs
        x = dropout_sparse(x, 1-self.dropout, self.features_nonzero)
        x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])
        x = tf.sparse_tensor_dense_matmul(self.adj, x)
        outputs = self.act(x)
        return outputs
model.py 文件源码 项目:GVIN 作者: sufengniu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def dot(x, y, sparse=False):
    if sparse:
        return tf.sparse_tensor_dense_matmul(x, y)
    else:
        return tf.matmul(x, y)
model.py 文件源码 项目:GVIN 作者: sufengniu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def dot(x, y, sparse=False):
    if sparse:
        return tf.sparse_tensor_dense_matmul(x, y)
    else:
        return tf.matmul(x, y)
model.py 文件源码 项目:GVIN 作者: sufengniu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def dot(x, y, sparse=False):
    if sparse:
        return tf.sparse_tensor_dense_matmul(x, y)
    else:
        return tf.matmul(x, y)
utils.py 文件源码 项目:sdp 作者: tansey 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def batch_sparse_tensor_dense_matmul(sp_a, b):
    '''Multiply sp_a by every row of b.'''
    return tf.map_fn(lambda b_i: tf.sparse_tensor_dense_matmul(sp_a, b_i), b)
test.py 文件源码 项目:sdp 作者: tansey 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, length, k, lam):
        with tf.variable_scope(type(self).__name__):
            self.length = length
            self.k = k
            self.lam = lam
            self.D = tf_get_delta(get_sparse_penalty_matrix((length,)), k)
            self.samples = tf.placeholder(tf.int32, [None])
            self.y = tf.one_hot(self.samples, length)
            self.q = tf.Variable([1.]*length)
            self.yhat = tf.nn.softmax(self.q, name='yhat')
            self.acc = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(tf.clip_by_value(self.yhat, 1e-10, 1.0)) 
                                                          + (1 - self.y) * tf.log(tf.clip_by_value(1 - self.yhat, 1e-10, 1.0)),
                                        axis=[1]))
            self.reg = tf.reduce_sum(tf.abs(tf.sparse_tensor_dense_matmul(self.D, tf.expand_dims(self.q,-1))))
            self.loss = self.acc + self.lam * self.reg
bipartite_gcn.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def collect_messages(self, messages):
        e_forward_mtr = self.graph_representation.get_entity_forward_v_by_m(normalized=True)
        e_backward_mtr = self.graph_representation.get_entity_backward_v_by_m(normalized=True)
        r_forward_mtr = self.graph_representation.get_relation_forward_v_by_m(normalized=True)
        r_backward_mtr = self.graph_representation.get_relation_backward_v_by_m(normalized=True)

        collected_e_messages = tf.sparse_tensor_dense_matmul(r_forward_mtr, messages[2])
        collected_e_messages += tf.sparse_tensor_dense_matmul(r_backward_mtr, messages[3])
        collected_r_messages = tf.sparse_tensor_dense_matmul(e_forward_mtr, messages[0])
        collected_r_messages += tf.sparse_tensor_dense_matmul(e_backward_mtr, messages[1])

        return collected_e_messages, collected_r_messages
gcn_diag.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        new_embedding = self_loop_messages + collected_messages_f + collected_messages_b + self.b

        if self.use_nonlinearity:
            new_embedding = tf.nn.relu(new_embedding)

        return new_embedding
gcn_only_bias.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        updated_vertex_embeddings = collected_messages_f + collected_messages_b

        if self.use_nonlinearity:
            activated = tf.nn.relu(updated_vertex_embeddings)
        else:
            activated = updated_vertex_embeddings

        return activated
gcn_basis.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        updated_vertex_embeddings = collected_messages_f + collected_messages_b

        if self.use_nonlinearity:
            activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages)
        else:
            activated = updated_vertex_embeddings + self_loop_messages

        return activated
gcn_basis_times_diag.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        new_embedding = self_loop_messages + collected_messages_f + collected_messages_b + self.b

        if self.use_nonlinearity:
            new_embedding = tf.nn.relu(new_embedding)

        return new_embedding
gcn_basis_concat.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        updated_vertex_embeddings = collected_messages_f + collected_messages_b

        if self.use_nonlinearity:
            activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages)
        else:
            activated = updated_vertex_embeddings + self_loop_messages

        return activated
gcn_basis_plus_diag.py 文件源码 项目:RelationPrediction 作者: MichSchli 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        new_embedding = self_loop_messages + collected_messages_f + collected_messages_b + self.b

        if self.use_nonlinearity:
            new_embedding = tf.nn.relu(new_embedding)

        return new_embedding
doRotations.py 文件源码 项目:dizzy_layer 作者: Pastromhaug 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def doRotations(X, rotations):
    # print('number of rotations in doRotations: %d' % len(rotations))
    with vs.variable_scope("Do_Rotations"):
        for sparse_rot in rotations:
            X = tf.sparse_tensor_dense_matmul(sparse_rot, X)
        return X
doRotationsSigmas.py 文件源码 项目:dizzy_layer 作者: Pastromhaug 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def doRotationsSigmas(X, rotations, num_units):
    with vs.variable_scope("Do_Rotations"):
        sigma = vs.get_variable(
            "Sigma", [num_units,1],
            dtype=tf.float32,
            initializer=init_ops.constant_initializer(value=1.0, dtype=tf.float32))
        sigma_spot = int(len(rotations)/2)
        for i, sparse_rot in enumerate(rotations):
            if i == sigma_spot:
                X = X * sigma
            X = tf.sparse_tensor_dense_matmul(sparse_rot, X)
        return X, sigma
rotationTransform.py 文件源码 项目:dizzy_layer 作者: Pastromhaug 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def rotationTransform(X, n, scope, num_rots=None):
    num_rots = num_rots or (n-1)
    n_prime = int(n*(n-1)//2*num_rots/(n-1))
    outputs = []

    with vs.variable_scope(scope or "RotationTransform"):

        for i, (name, x) in enumerate(X):
            (indices, values_idxs) = rotationPreprocess(n, num_rots)
            thetas = vs.get_variable(initializer=tf.random_uniform([n_prime, 1], 0, 2*math.pi),
                    name="Thetas"+str(i)+name, dtype=tf.float32)

            cos = tf.cos(thetas)
            sin = tf.sin(thetas)
            nsin = tf.neg(sin)

            thetas_concat = tf.concat(0, [cos,sin,nsin])

            gathered_values = tf.squeeze(tf.gather(thetas_concat, values_idxs))
            shape = tf.constant([n, n], dtype=tf.int64)

            splt_values = tf.split(0, num_rots, gathered_values)
            splt_indices = tf.split(0, num_rots, indices)

            shape = tf.constant([n,n], dtype=tf.int64)
            for i in range(num_rots):
                curr_indices = splt_indices[i]
                curr_values = splt_values[i]
                sparse_rot = tf.SparseTensor(indices=curr_indices, values=curr_values, shape=shape)
                x = tf.sparse_tensor_dense_matmul(sparse_rot, x)
            outputs.append(x)
    return outputs
models_siamese.py 文件源码 项目:gcn_metric_learning 作者: sk1712 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def chebyshev5(self, x, L, Fout, K):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat(0, [x, x_])  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
models_siamese.py 文件源码 项目:gcn_metric_learning 作者: sk1712 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def chebyshev5(self, x, L, Fout, K, regularization=False):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat(0, [x, x_])  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=regularization)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
neural_fingerprints.py 文件源码 项目:neural_fingerprints_tf 作者: fllinares 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def graph_convolution_layer(self, node_emb, scope, edge_emb=None):
        # Path to hyperparameters and configuration settings for the graph convolutional layers
        prefix = 'model/graph_conv_layers'

        with tf.variable_scope(scope, reuse=not self.is_training):
            # Compute the extended node embedding as the concatenation of the original node embedding and the sum of
            # the node embeddings of all distance-one neighbors in the graph.
            ext_node_emb = tf.concat([node_emb, tf.sparse_tensor_dense_matmul(self.input['adj_mat'], node_emb)], axis=1)
            # If edge labels are to be considered by the model, concatenate as well the (pre-computed) sum of the
            # feature vectors labelling all edges connected to each node
            if edge_emb is not None:
                ext_node_emb = tf.concat([ext_node_emb, edge_emb], axis=1)

            # Compute output by applying a fully connected layer to the extended node embedding
            out = tf.contrib.layers.fully_connected(inputs=ext_node_emb,
                                                    num_outputs=self.getitem('config', 'num_outputs', prefix),
                                                    activation_fn=self.string_to_tf_act(self.getitem('config', 'activation_fn', prefix)),
                                                    weights_initializer=self.weights_initializer_graph_conv,
                                                    weights_regularizer=self.weights_regularizer_graph_conv,
                                                    biases_initializer=tf.constant_initializer(0.1, tf.float32),
                                                    normalizer_fn=self.normalizer_fn_graph_conv,
                                                    normalizer_params=self.normalizer_params_graph_conv,
                                                    trainable=self.getitem('config', 'trainable', prefix))

            # Apply dropout (if necessary). Alternatively, could have also forced keep_prob to 1.0 when is_training is
            # False
            if self.is_training:
                out = tf.nn.dropout(out, self.getitem('config', 'keep_prob', prefix))

        return out
neural_fingerprints.py 文件源码 项目:neural_fingerprints_tf 作者: fllinares 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_graph_fingerprint(self):
        # Total number of graph convolution layers
        n_layers = self.getitem('config', 'model/graph_conv_layers/n_layers')

        # Create output dictionaries for graph convolutional layers and fingerprint output layers
        self.output['graph_conv_layers'] = {}
        self.output['fingerprint_output_layers'] = {}

        # Input node embeddings
        node_emb = self.input['node_features']

        # Pre-compute the sum of the feature vectors labelling all edges connected to each node (if necessary)
        self.output['graph_conv_layers']['edge_emb'] = None
        if self.getitem('config', 'model/input/edge_features/use') and self.num_edge_features > 0:
            self.output['graph_conv_layers']['edge_emb'] = tf.sparse_tensor_dense_matmul(self.input['inc_mat'],
                                                                                         self.input['edge_features'])
        # Compute node and graph level fingerprints for the input layer
        graph_fp, node_fp = self.output_embedding_layer(node_emb, 'output_embedding_layer_0')

        # List of node-level embeddings per layer (output of graph convolutional layers), node-level fingerprints per
        # layer (per-node output of fingerprint output layers) and graph-level fingerprints per layer (total output of
        # fingerprint output layers)
        self.output['graph_conv_layers']['node_emb'] = [node_emb]
        self.output['fingerprint_output_layers']['node_fp'] = [node_fp]
        self.output['fingerprint_output_layers']['graph_fp'] = [graph_fp]

        # Create all graph convolutional layers and their respective fingerprint output layers
        for layer_idx in xrange(1, n_layers+1):
            node_emb = self.graph_convolution_layer(node_emb=self.output['graph_conv_layers']['node_emb'][-1],
                                                    scope='graph_conv_layer_%d' % layer_idx,
                                                    edge_emb=self.output['graph_conv_layers']['edge_emb'])
            graph_fp, node_fp = self.output_embedding_layer(node_emb=self.output['graph_conv_layers']['node_emb'][-1],
                                                            scope='output_embedding_layer_%d' % layer_idx)
            # Append outputs to lists
            self.output['graph_conv_layers']['node_emb'].append(node_emb)
            self.output['fingerprint_output_layers']['node_fp'].append(node_fp)
            self.output['fingerprint_output_layers']['graph_fp'].append(graph_fp)

        # Obtain graph fingerprint as the sum of the graph activations across all layers
        self.output['fingerprint_output_layers']['fingerprint'] = tf.add_n(self.output['fingerprint_output_layers']['graph_fp'])
rouge_scorer.py 文件源码 项目:wip-constrained-extractor 作者: brain-research 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_rouge_recall_suff_stats(self, pred_counts, gold_counts):
    """Get overlapping predicted counts and gold counts for pair of bags."""
    # Map words to their stems.
    pred_stems = tf.transpose(
        tf.sparse_tensor_dense_matmul(
            self.stem_projector_stopworded,
            pred_counts,
            adjoint_a=True,
            adjoint_b=True))
    # <UNK> tokens count as always missing from predicted counts but not
    # from gold counts to avoid overly optimistic evaluation.
    gold_stems = tf.transpose(
        tf.sparse_tensor_dense_matmul(
            self.stem_projector_stopworded_keep_unk,
            gold_counts,
            adjoint_a=True,
            adjoint_b=True))

    # Only count max of 1 point for each overlapping word type
    pred_stems = tf.minimum(1.0, pred_stems)
    gold_stems = tf.minimum(1.0, gold_stems)

    overlaps = tf.reduce_sum(tf.minimum(pred_stems, gold_stems), 1)

    gold_counts = tf.reduce_sum(gold_stems, 1)

    return overlaps, gold_counts
models.py 文件源码 项目:product-nets 作者: Atomu2014 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0,
                 random_seed=None):
        Model.__init__(self)
        init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
                     ('b', [output_dim], 'zero', dtype)]
        self.graph = tf.Graph()
        with self.graph.as_default():
            if random_seed is not None:
                tf.set_random_seed(random_seed)
            self.X = tf.sparse_placeholder(dtype)
            self.y = tf.placeholder(dtype)
            self.vars = utils.init_var_map(init_vars, init_path)

            w = self.vars['w']
            b = self.vars['b']
            xw = tf.sparse_tensor_dense_matmul(self.X, w)
            logits = tf.reshape(xw + b, [-1])
            self.y_prob = tf.sigmoid(logits)

            self.loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \
                        l2_weight * tf.nn.l2_loss(xw)
            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=config)
            tf.global_variables_initializer().run(session=self.sess)
models.py 文件源码 项目:product-nets 作者: Atomu2014 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2,
                 l2_w=0, l2_v=0, random_seed=None):
        Model.__init__(self)
        init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
                     ('v', [input_dim, factor_order], 'xavier', dtype),
                     ('b', [output_dim], 'zero', dtype)]
        self.graph = tf.Graph()
        with self.graph.as_default():
            if random_seed is not None:
                tf.set_random_seed(random_seed)
            self.X = tf.sparse_placeholder(dtype)
            self.y = tf.placeholder(dtype)
            self.vars = utils.init_var_map(init_vars, init_path)

            w = self.vars['w']
            v = self.vars['v']
            b = self.vars['b']

            X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X)))
            xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, v))
            p = 0.5 * tf.reshape(
                tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(v)), 1),
                [-1, output_dim])
            xw = tf.sparse_tensor_dense_matmul(self.X, w)
            logits = tf.reshape(xw + b + p, [-1])
            self.y_prob = tf.sigmoid(logits)

            self.loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \
                        l2_w * tf.nn.l2_loss(xw) + \
                        l2_v * tf.nn.l2_loss(xv)
            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=config)
            tf.global_variables_initializer().run(session=self.sess)


问题


面经


文章

微信
公众号

扫码关注公众号