def row_index(shape):
"""
Generate an X index for the given tensor.
.. code-block:: python
[
[ 0, 1, 2, ... width-1 ],
[ 0, 1, 2, ... width-1 ],
... (x height)
]
:param list[int] shape:
:return: Tensor
"""
height = shape[0]
width = shape[1]
row_identity = tf.cumsum(tf.ones([width], dtype=tf.int32), exclusive=True)
row_identity = tf.reshape(tf.tile(row_identity, [height]), [height, width])
return row_identity
python类tile()的实例源码
def align(hid_align, h_dec, scope):
h_dec_align = linear3(h_dec, dim_align, "h_dec_align_"+scope) #batch_size x dimAlign
h_dec_align = tf.reshape(h_dec_align,[batch_size,1,dim_align])
h_dec_align_tiled = tf.tile(h_dec_align, [1, sentence_length, 1])
all_align = tf.tanh(h_dec_align + hid_align)
with tf.variable_scope("v_align_"+scope, reuse = DO_SHARE):
v_align=tf.get_variable("v_align_"+scope, [dim_align], initializer=tf.constant_initializer(0.0))
e_t = all_align * v_align
e_t = tf.reduce_sum(e_t, 2)
# normalise
alpha = tf.nn.softmax(e_t) # batch_size x sentence_length
alpha_t = tf.reshape(alpha, [batch_size, sentence_length, 1])
alpha_tile = tf.tile(alpha_t, [1, 1, 2*y_enc_size])
s_t = tf.multiply(alpha_tile, h_t_lang)
s_t = tf.reduce_sum(s_t, 1)
return s_t,alpha
def run_lstm(self, encoded_rep, q_rep, masks):
encoded_question, encoded_passage = encoded_rep
masks_question, masks_passage = masks
q_rep = tf.expand_dims(q_rep, 1) # (batch_size, 1, D)
encoded_passage_shape = tf.shape(encoded_passage)[1]
q_rep = tf.tile(q_rep, [1, encoded_passage_shape, 1])
mixed_question_passage_rep = tf.concat([encoded_passage, q_rep], axis=-1)
with tf.variable_scope("lstm_"):
cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, state_is_tuple = True)
reverse_mixed_question_passage_rep = _reverse(mixed_question_passage_rep, masks_passage, 1, 0)
output_attender_fw, _ = tf.nn.dynamic_rnn(cell, mixed_question_passage_rep, dtype=tf.float32, scope ="rnn")
output_attender_bw, _ = tf.nn.dynamic_rnn(cell, reverse_mixed_question_passage_rep, dtype=tf.float32, scope = "rnn")
output_attender_bw = _reverse(output_attender_bw, masks_passage, 1, 0)
output_attender = tf.concat([output_attender_fw, output_attender_bw], axis = -1) # (-1, P, 2*H)
return output_attender
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def __init__(self,input_shape,control_points_ratio):
self.num_batch = input_shape[0]
self.depth = input_shape[1]
self.height = input_shape[2]
self.width = input_shape[3]
self.num_channels = input_shape[4]
self.out_height = self.height
self.out_width = self.width
self.out_depth = self.depth
self.X_controlP_number = int(input_shape[3] / \
(control_points_ratio))
self.Y_controlP_number = int(input_shape[2] / \
(control_points_ratio))
self.Z_controlP_number = int(input_shape[1] / \
(control_points_ratio))
init_x = np.linspace(-5,5,self.X_controlP_number)
init_y = np.linspace(-5,5,self.Y_controlP_number)
init_z = np.linspace(-5,5,self.Z_controlP_number)
x_s = np.tile(init_x, [self.Y_controlP_number*self.Z_controlP_number])
y_s = np.tile(np.repeat(init_y,self.X_controlP_number),[self.Z_controlP_number])
z_s = np.repeat(init_z,self.X_controlP_number*self.Y_controlP_number)
self.initial = np.array([x_s,y_s,z_s])
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def _local_Networks(self,input_dim,x):
with tf.variable_scope('_local_Networks'):
x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels])
W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3])
initial = self.initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2)
#temp use
if Debug == True:
x = np.linspace(-1.0,1.0,self.X_controlP_number)
y = np.linspace(-1.0,1.0,self.Y_controlP_number)
z = np.linspace(-1.0,1.0,self.Z_controlP_number)
x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64')
y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number])
z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64')
h_fc_loc2 = tf.concat([x_s,y_s,z_s],0)
h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch])
h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1])
#2*(4*4*4)*3->(2,192)
return h_fc_loc2
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def _local_Networks(self,input_dim,x):
with tf.variable_scope('_local_Networks'):
x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels])
W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3])
initial = self.initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2)
#temp use
if Debug == True:
x = np.linspace(-1.0,1.0,self.X_controlP_number)
y = np.linspace(-1.0,1.0,self.Y_controlP_number)
z = np.linspace(-1.0,1.0,self.Z_controlP_number)
x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64')
y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number])
z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64')
h_fc_loc2 = tf.concat([x_s,y_s,z_s],0)
h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch])
h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1])
#2*(4*4*4)*3->(2,192)
return h_fc_loc2
def __init__(self):
self._num_classes = cfg.NUM_CLASSES
self._batch_size = cfg.TRAIN.BATCH_SIZE
self._latent_size = 128
self._hidden_size = 256
self._x_labeled = tf.placeholder(tf.float32, shape=[self._batch_size, 28, 28, 1])
self._x_unlabeled = tf.placeholder(tf.float32, shape=[self._batch_size, 28, 28, 1])
self._x = tf.concat([self._x_labeled, self._x_unlabeled], 0)
self._y_labeled = tf.placeholder(tf.float32, shape=[self._batch_size, self._num_classes])
self._y_all, self.y_unlabeled = self.generate_y(self._y_labeled)
self._losses = {}
self._initializer = self.define_initializer()
self._blocks_encoder = [resnet_utils.Block('block4', bottleneck, [(256, 128, 1)] * 3)]
self._blocks_decoder_valid = [resnet_utils.Block('block5', bottleneck_trans_valid,
[(256, 128, 1), (256, 128, 2)])]
self._blocks_decoder_same = [resnet_utils.Block('block5', bottleneck_trans_same,
[(256, 128, 2), (256, 128, 2)])]
self._resnet_scope = 'resnet_v1_%d' % 101
x_unlabeled_tiled = tf.tile(self._x_unlabeled, [self._num_classes, 1, 1, 1]) # (100, 256) --> (2100, 256)
self.outputs = {'labeled': {'x_in': self._x_labeled}, 'unlabeled': {'x_in': x_unlabeled_tiled}}
def encoder(self, x):
with tf.variable_scope('encoder'):
net = resnet_utils.conv2d_same(x, 64, 7, stride=2, scope='conv1')
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
x = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
x_features_all, _ = resnet_v1.resnet_v1(x,
self._blocks_encoder,
global_pool=False,
include_root_block=False,
scope=self._resnet_scope)
x_features_all = tf.reduce_mean(x_features_all, axis=[1, 2])
x_features_labeled, x_features_unlabeled = tf.split(x_features_all, 2)
x_features_tiled = tf.tile(x_features_unlabeled, [self._num_classes, 1]) # (100, 256) --> (2100, 256)
x_features = tf.concat([x_features_labeled, x_features_tiled], 0) # (2100, 256) --> (2200, 256)
return x_features
def __init__(self, dims, multiples, name="tile_by_dim"):
"""Constructs the `TileByDim` module.
Args:
dims: The dimensions to tile along, as a list of unique integers.
multiples: The multiple of the tiling, as a list of integers. Must
be the same length as the `dims` list.
name: The name of the module.
Raises:
ValueError: If `dims` has non-unique integers, or if the size of
`multiples` is different from the size of `dims`.
"""
super(TileByDim, self).__init__(name=name)
self._dims = dims
self._multiples = multiples
if np.unique(dims).size != len(dims):
raise ValueError("dims must not have any repeated integers.")
if len(multiples) != len(dims):
raise ValueError(
"multiples must have the same length as dims: {}.".format(len(dims)))
def _build(self, inputs):
"""Connects the `TileByDim` module into the graph.
Args:
inputs: `Tensor` to tile.
Returns:
The tiled tensor.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Builds default lists for multiples to pass to `tf.tile`.
full_multiples = [1] * rank
# Updates lists with what the user provided.
for dim, multiple in zip(self._dims, self._multiples):
full_multiples[dim] = multiple
return tf.tile(inputs, multiples=full_multiples)
def testComparison(self):
# Here we compare the output with the `tf.tile` equivalent.
in_shape = [2, 3, 4]
inputs = tf.random_uniform(shape=in_shape)
dims = [0, 2]
multiples = [2, 4]
mod = snt.TileByDim(dims=dims, multiples=multiples)
output = mod(inputs)
multiple_tf = [2, 1, 4]
ref_output = tf.tile(inputs, multiples=multiple_tf)
with self.test_session() as sess:
actual, expected = sess.run([output, ref_output])
self.assertAllEqual(actual, expected)
def _create_decoder(self, encoder_output, features, _labels):
attention_class = locate(self.params["attention.class"]) or \
getattr(decoders.attention, self.params["attention.class"])
attention_layer = attention_class(
params=self.params["attention.params"], mode=self.mode)
# If the input sequence is reversed we also need to reverse
# the attention scores.
reverse_scores_lengths = None
if self.params["source.reverse"]:
reverse_scores_lengths = features["source_len"]
if self.use_beam_search:
reverse_scores_lengths = tf.tile(
input=reverse_scores_lengths,
multiples=[self.params["inference.beam_search.beam_width"]])
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
attention_values=encoder_output.attention_values,
attention_values_length=encoder_output.attention_values_length,
attention_keys=encoder_output.outputs,
attention_fn=attention_layer,
reverse_scores_lengths=reverse_scores_lengths)
def initialize(self, name=None):
finished = tf.tile([False], [self.config.beam_width])
start_tokens_batch = tf.fill([self.config.beam_width], self.start_tokens)
first_inputs = tf.nn.embedding_lookup(self.target_embedding, start_tokens_batch)
first_inputs = tf.expand_dims(first_inputs, 1)
zeros_padding = tf.zeros([self.config.beam_width, self.params['max_decode_length']-1, self.target_embedding.get_shape().as_list()[-1]])
first_inputs = tf.concat([first_inputs, zeros_padding], axis=1)
outputs = tf.tile(self.initial_state.outputs, [self.config.beam_width,1,1])
attention_values = tf.tile(self.initial_state.attention_values, [self.config.beam_width,1,1])
enc_output = EncoderOutput(
outputs=outputs,
final_state=self.initial_state.final_state,
attention_values=attention_values,
attention_values_length=self.initial_state.attention_values_length)
return finished, first_inputs, enc_output
def initialize(self, name=None):
finished = tf.tile([False], [self.config.beam_width])
start_tokens_batch = tf.fill([self.config.beam_width], self.start_tokens)
first_inputs = tf.nn.embedding_lookup(self.target_embedding, start_tokens_batch)
first_inputs = tf.expand_dims(first_inputs, 1)
zeros_padding = tf.zeros([self.config.beam_width, self.params['max_decode_length']-1, self.target_embedding.get_shape().as_list()[-1]])
first_inputs = tf.concat([first_inputs, zeros_padding], axis=1)
beam_state = beam_search.create_initial_beam_state(self.config)
outputs = tf.tile(self.initial_state.outputs, [self.config.beam_width,1,1])
attention_values = tf.tile(self.initial_state.attention_values, [self.config.beam_width,1,1])
enc_output = EncoderOutput(
outputs=outputs,
final_state=self.initial_state.final_state,
attention_values=attention_values,
attention_values_length=self.initial_state.attention_values_length)
return finished, first_inputs, (enc_output, beam_state)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N*B, I + B]
:param state: [N*B, d]
:param scope:
:return: [N*B, d]
"""
with tf.variable_scope(scope or self.__class__.__name__):
d = self.state_size
x = tf.slice(inputs, [0, 0], [-1, self._input_size]) # [N*B, I]
mask = tf.slice(inputs, [0, self._input_size], [-1, -1]) # [N*B, B]
B = tf.shape(mask)[1]
prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1) # [N, B, d] -> [N, 1, B, d]
mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d]) # [N, B, B, d]
# prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2)
prev_state = self._reduce_func(exp_mask(prev_state, mask), 2) # [N, B, d]
prev_state = tf.reshape(prev_state, [-1, d]) # [N*B, d]
return self._cell(x, prev_state)
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N, d + JQ + JQ * d]
:param state: [N, d]
:param scope:
:return:
"""
with tf.variable_scope(scope or self.__class__.__name__):
c_prev, h_prev = state
x = tf.slice(inputs, [0, 0], [-1, self._input_size])
q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len]) # [N, JQ]
qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1])
qs = tf.reshape(qs, [-1, self._q_len, self._input_size]) # [N, JQ, d]
x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1]) # [N, JQ, d]
h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1]) # [N, JQ, d]
f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f')) # [N, JQ, d]
a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask)) # [N, JQ]
q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1)
z = tf.concat(1, [x, q]) # [N, 2d]
return self._cell(z, state)
def get_double_linear_controller(size, bias, input_keep_prob=1.0, is_train=None):
def double_linear_controller(inputs, state, memory):
"""
:param inputs: [N, i]
:param state: [N, d]
:param memory: [N, M, m]
:return: [N, M]
"""
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = double_linear_logits(in_, size, bias, input_keep_prob=input_keep_prob,
is_train=is_train)
return out
return double_linear_controller
def get_linear_controller(bias, input_keep_prob=1.0, is_train=None):
def linear_controller(inputs, state, memory):
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = linear(in_, 1, bias, squeeze=True, input_keep_prob=input_keep_prob, is_train=is_train)
return out
return linear_controller
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']:
h0s = kwargs['recurrent_state'][self]
else:
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hs = tf.scan(
self.step,
elems=shuffled_input,
initializer=h0s
)
shuffled_hs = tf.transpose(hs, (1, 0, 2))
if 'recurrent_state_output' in kwargs:
kwargs['recurrent_state_output'][self] = shuffled_hs
return shuffled_hs