def _generator(self, z, y, is_training):
'''
Input:
z: shape=[b, c]
y: speaker label; shape=[b,], dtype=int64
Return:
xh: reconstructed version of `x` (the input to the VAE)
'''
self.speaker_repr = self._l2_regularized_embedding(
n_class=self.arch['y_dim'],
h_dim=self.arch['yemb_dim'],
scope_name='y_embedding',
var_name='y_emb'
)
c = tf.nn.embedding_lookup(self.speaker_repr, y)
x = tf.concat([z, c], -1)
for o in self.arch['decoder']['output']:
x = tf.layers.dense(x, units=o, activation=lrelu)
# x = tf.layers.batch_normalization(x, training=is_training)
return tf.layers.dense(x, units=self.arch['x_dim'], name='xh')
python类concat()的实例源码
def _crop_pool_layer(self, bottom, rois, name):
with tf.variable_scope(name) as scope:
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
# Get the normalized coordinates of bounding boxes
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
# Won't be back-propagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
pre_pool_size = cfg.POOLING_SIZE * 2
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
return slim.max_pool2d(crops, [2, 2], padding='SAME')
def bilateral_slice(grid, guide, name=None):
"""Slices into a bilateral grid using the guide map.
Args:
grid: (Tensor) [batch_size, grid_h, grid_w, depth, n_outputs]
grid to slice from.
guide: (Tensor) [batch_size, h, w ] guide map to slice along.
name: (string) name for the operation.
Returns:
sliced: (Tensor) [batch_size, h, w, n_outputs] sliced output.
"""
with tf.name_scope(name):
gridshape = grid.get_shape().as_list()
if len(gridshape) == 6:
_, _, _, _, n_out, n_in = gridshape
grid = tf.concat(tf.unstack(grid, None, axis=5), 4)
sliced = hdrnet_ops.bilateral_slice(grid, guide)
if len(gridshape) == 6:
sliced = tf.stack(tf.split(sliced, n_in, axis=3), axis=4)
return sliced
# pylint: enable=redefined-builtin
def discriminate(self, image, Y):
print("Initializing the discriminator")
print("Y shape", Y.get_shape())
yb = tf.reshape(Y, tf.stack([self.batch_size, 1, 1, self.dim_y]))
print("image shape", image.get_shape())
print("yb shape", yb.get_shape())
X = tf.concat([image, yb * tf.ones([self.batch_size, 24, 24, self.dim_y])],3)
print("X shape", X.get_shape())
h1 = lrelu( tf.nn.conv2d( X, self.discrim_W1, strides=[1,2,2,1], padding='SAME' ))
print("h1 shape", h1.get_shape())
h1 = tf.concat([h1, yb * tf.ones([self.batch_size, 12, 12, self.dim_y])],3)
print("h1 shape", h1.get_shape())
h2 = lrelu(batchnormalize( tf.nn.conv2d( h1, self.discrim_W2, strides=[1,2,2,1], padding='SAME')) )
print("h2 shape", h2.get_shape())
h2 = tf.reshape(h2, [self.batch_size, -1])
h2 = tf.concat([h2, Y], 1)
discri=tf.matmul(h2, self.discrim_W3 )
print("discri shape", discri.get_shape())
h3 = lrelu(batchnormalize(discri))
return h3
def samples_generator(self, batch_size):
Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])
Y = tf.placeholder(tf.float32, [batch_size, self.dim_y])
yb = tf.reshape(Y, [batch_size, 1, 1, self.dim_y])
Z_ = tf.concat([Z,Y], 1)
h1 = tf.nn.relu(batchnormalize(tf.matmul(Z_, self.gen_W1)))
h1 = tf.concat([h1, Y], 1)
h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))
h2 = tf.reshape(h2, [batch_size,6,6,self.dim_W2])
h2 = tf.concat([h2, yb*tf.ones([batch_size, 6,6, self.dim_y])], 3)
output_shape_l3 = [batch_size,12,12,self.dim_W3]
h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])
h3 = tf.nn.relu( batchnormalize(h3) )
h3 = tf.concat([h3, yb*tf.ones([batch_size, 12,12,self.dim_y])], 3)
output_shape_l4 = [batch_size,24,24,self.dim_channel]
h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])
x = tf.nn.sigmoid(h4)
return Z, Y, x
cpm_body_slim.py 文件源码
项目:convolutional-pose-machines-tensorflow
作者: timctho
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
self.sub_stage_img_feature,
self.center_map],
axis=3)
with slim.arg_scope([slim.conv2d],
padding='SAME',
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer()):
mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
scope='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
cpm_hand_slim.py 文件源码
项目:convolutional-pose-machines-tensorflow
作者: timctho
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
self.sub_stage_img_feature,
# self.center_map,
],
axis=3)
with slim.arg_scope([slim.conv2d],
padding='SAME',
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer()):
mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
scope='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
#print('input x:',x.get_shape().as_list())
#print('input y:',y.get_shape().as_list())
xshape=x.get_shape()
#tile by [1,64,64,1]
tile_shape=tf.stack([1,xshape[1],xshape[2],1])
tile_y=tf.tile(y,tile_shape)
#print('tile y:',tile_y.get_shape().as_list())
return tf.concat([x,tile_y],axis=3)
#x_shapes = x.get_shape()
#y_shapes = y.get_shape()
#return tf.concat([
#x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
def setup_tensor(self):
if self._label is not None:#already setup
if debug:
#Notify that already setup (normal behavior)
print('self.',self.name,' has refuted setting up tensor')
return
tf_parents=[self.z]+[node.label for node in self.parents]
with tf.variable_scope(self.name) as vs:
h=tf.concat(tf_parents,-1)#tensor of parent values
for l in range(self.n_layers-1):
h=slim.fully_connected(h,self.n_hidden,activation_fn=lrelu,scope='layer'+str(l))
self._label_logit = slim.fully_connected(h,1,activation_fn=None,scope='proj')
self._label=tf.nn.sigmoid( self._label_logit )
if debug:
print('self.',self.name,' has setup _label=',self._label)
#There could actually be some (quiet) error here I think if one of the
#names in the causal graph is a substring of some other name.
#e.g. 'hair' and 'black_hair'
#Sorry, not coded to anticipate corner case
self.setup_var=tf.contrib.framework.get_variables(vs)
def rgb_to_bgr(self, inputs):
if True:
if True:
VGG_MEAN = [103.939, 116.779, 123.68]
try:
red, green, blue = tf.split(inputs, 3, 3)
except:
red, green, blue = tf.split(3,3,inputs)
#assert red.get_shape().as_list()[1:] == [224, 224, 1]
#assert green.get_shape().as_list()[1:] == [224, 224, 1]
#assert blue.get_shape().as_list()[1:] == [224, 224, 1]
try:
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2]], axis=3)
except:
bgr = tf.concat(3,[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2]])
return bgr
def _ZF_up_block(self,net, down, ksizes,filters,dropout,keep_prob,name,activations,strides,batchnorm):
channels = net.get_shape().as_list()[-1]
with tf.variable_scope(name.split('/')[-1]):
net = self._deconv2D(net, ksize=2, in_channel=channels,
out_channel=channels, strides=[1,2,2,1], layer_name="%s/deconv"%(name),
padding='SAME', activation=None, L2 = 1)
try:
net = tf.concat([net,down],axis=3)
except:
net = tf.concat(3, [net,down])
net = self.conv_block(net, "%s/conv_block"%(name), ksizes=ksizes, filters=filters,
activations=activations, strides=strides, batchnorm=batchnorm)
if dropout:
net = tf.nn.dropout(net, keep_prob = self.keep_prob)
return net
threepart_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def pad_up_to(vector, size, rank):
length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1,))
with tf.control_dependencies([tf.assert_non_negative(length_diff, data=(vector, size, tf.shape(vector)))]):
padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0,0]*(rank-1)], axis=0), shape=((rank+1), 2))
return tf.pad(vector, padding, mode='constant')
def encode(self, inputs, input_length, _parses):
with tf.name_scope('BiLSTMEncoder'):
fw_cell_enc = tf.contrib.rnn.MultiRNNCell([self._make_rnn_cell(i) for i in range(self._num_layers)])
bw_cell_enc = tf.contrib.rnn.MultiRNNCell([self._make_rnn_cell(i) for i in range(self._num_layers)])
outputs, output_state = tf.nn.bidirectional_dynamic_rnn(fw_cell_enc, bw_cell_enc, inputs, input_length,
dtype=tf.float32)
fw_output_state, bw_output_state = output_state
# concat each element of the final state, so that we're compatible with a unidirectional
# decoder
output_state = nest.pack_sequence_as(fw_output_state, [tf.concat((x, y), axis=1) for x, y in zip(nest.flatten(fw_output_state), nest.flatten(bw_output_state))])
return tf.concat(outputs, axis=2), output_state
beam_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def _merge_batch_beams(self, t, s):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
t_shape = tf.shape(t)
reshaped = tf.reshape(t, tf.concat(([self._batch_size * self._beam_width], t_shape[2:]), axis=0))
reshaped.set_shape(tf.TensorShape([None]).concatenate(s))
return reshaped
beam_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def _split_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
t_shape = tf.shape(t)
reshaped = tf.reshape(t, tf.concat(([self._batch_size, self._beam_width], t_shape[1:]), axis=0))
reshaped.set_shape(tf.TensorShape([None, self._beam_width]).concatenate(t.shape[1:]))
expected_reshaped_shape = tf.TensorShape([None, self._beam_width]).concatenate(s)
if not reshaped.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?"
% (reshaped.shape, expected_reshaped_shape))
return reshaped
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def pad_up_to(vector, size):
rank = vector.get_shape().ndims - 1
length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1,))
with tf.control_dependencies([tf.assert_non_negative(length_diff, data=(vector, size, tf.shape(vector)))]):
padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0,0]*(rank-1)], axis=0), shape=((rank+1), 2))
return tf.pad(vector, padding, mode='constant')
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def __init__(self, wrapped : tf.contrib.rnn.RNNCell, parent_state):
super().__init__()
self._wrapped = wrapped
self._flat_parent_state = tf.concat(nest.flatten(parent_state), axis=1)
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def call(self, input, state):
concat_input = tf.concat((self._flat_parent_state, input), axis=1)
return self._wrapped.call(concat_input, state)
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def __init__(self, wrapped : tf.contrib.rnn.RNNCell, constant_input):
super().__init__()
self._wrapped = wrapped
self._flat_constant_input = tf.concat(nest.flatten(constant_input), axis=1)
def average_gradients(tower_gradients):
r'''
A routine for computing each variable's average of the gradients obtained from the GPUs.
Note also that this code acts as a syncronization point as it requires all
GPUs to be finished with their mini-batch before it can run to completion.
'''
# List of average gradients to return to the caller
average_grads = []
# Loop over gradient/variable pairs from all towers
for grad_and_vars in zip(*tower_gradients):
# Introduce grads to store the gradients for the current variable
grads = []
# Loop over the gradients for the current variable
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Create a gradient/variable tuple for the current variable with its average gradient
grad_and_var = (grad, grad_and_vars[0][1])
# Add the current tuple to average_grads
average_grads.append(grad_and_var)
# Return result to caller
return average_grads
# Logging
# =======