def segment_indices(segment_ids, name=None):
"""Returns a `Tensor` of indices within each segment.
segment_ids should be a sequence of non-decreasing non-negative integers that
define a set of segments, e.g. [0, 0, 1, 2, 2, 2] defines 3 segments of length
2, 1 and 3. The return value is a `Tensor` containing the indices within each
segment.
Example input: [0, 0, 1, 2, 2, 2]
Example output: [0, 1, 0, 0, 1, 2]
Args:
segment_ids: A 1-d `Tensor` containing an non-decreasing sequence of
non-negative integers with type `tf.int32` or `tf.int64`.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the indices within each segment.
"""
with tf.name_scope(name, 'segment_indices'):
segment_lengths = tf.segment_sum(tf.ones_like(segment_ids), segment_ids)
segment_starts = tf.gather(tf.concat([[0], tf.cumsum(segment_lengths)], 0),
segment_ids)
return (tf.range(tf.size(segment_ids, out_type=segment_ids.dtype)) -
segment_starts)
python类segment_sum()的实例源码
def output_embedding_layer(self, node_emb, scope):
# Path to hyperparameters and configuration settings for the fingerprint output layers
prefix = 'model/fingerprint_output_layers'
with tf.variable_scope(scope, reuse=not self.is_training):
# Compute node-level activation
node_fp = tf.contrib.layers.fully_connected(inputs=node_emb,
num_outputs=self.getitem('config', 'num_outputs', prefix),
activation_fn=self.string_to_tf_act(self.getitem('config', 'activation_fn', prefix)),
weights_initializer=self.weights_initializer_fp_out,
weights_regularizer=self.weights_regularizer_fp_out,
biases_initializer=tf.constant_initializer(0.0, tf.float32),
trainable=self.getitem('config', 'trainable', prefix))
# Apply dropout (if necessary). Alternatively, could have also forced keep_prob to 1.0 when is_training is
# False
if self.is_training:
node_fp = tf.nn.dropout(node_fp, self.getitem('config', 'keep_prob', prefix))
# Compute the graph-level activation as the sum of the node-level activations for all nodes in the graph
graph_fp = tf.segment_sum(data=node_fp, segment_ids=self.input['node_graph_map'])
return graph_fp, node_fp
def segment_logsumexp(xs, segments):
""" Similar tf.segment_sum but compute logsumexp rather then sum """
# Stop gradients following the implementation of tf.reduce_logsumexp
maxs = tf.stop_gradient(tf.reduce_max(xs, axis=1))
segment_maxes = tf.segment_max(maxs, segments)
xs -= tf.expand_dims(tf.gather(segment_maxes, segments), 1)
sums = tf.reduce_sum(tf.exp(xs), axis=1)
return tf.log(tf.segment_sum(sums, segments)) + segment_maxes
def _compute_vert_context_soft(self, edge_factor, vert_factor, reuse=False):
"""
attention-based vertex(node) message pooling
"""
out_edge = utils.pad_and_gather(edge_factor, self.edge_pair_mask_inds[:,0])
in_edge = utils.pad_and_gather(edge_factor, self.edge_pair_mask_inds[:,1])
# gather correspounding vert factors
vert_factor_gathered = tf.gather(vert_factor, self.edge_pair_segment_inds)
# concat outgoing edges and ingoing edges with gathered vert_factors
out_edge_w_input = tf.concat(concat_dim=1, values=[out_edge, vert_factor_gathered])
in_edge_w_input = tf.concat(concat_dim=1, values=[in_edge, vert_factor_gathered])
# compute compatibility scores
(self.feed(out_edge_w_input)
.fc(1, relu=False, reuse=reuse, name='out_edge_w_fc')
.sigmoid(name='out_edge_score'))
(self.feed(in_edge_w_input)
.fc(1, relu=False, reuse=reuse, name='in_edge_w_fc')
.sigmoid(name='in_edge_score'))
out_edge_w = self.get_output('out_edge_score')
in_edge_w = self.get_output('in_edge_score')
# weight the edge factors with computed weigths
out_edge_weighted = tf.mul(out_edge, out_edge_w)
in_edge_weighted = tf.mul(in_edge, in_edge_w)
edge_sum = out_edge_weighted + in_edge_weighted
vert_ctx = tf.segment_sum(edge_sum, self.edge_pair_segment_inds)
return vert_ctx
def test_SegmentSum(self):
t = tf.segment_sum(self.random(4, 2, 3), np.array([0, 1, 1, 2]))
self.check(t)