def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name
mean = tf.reduce_mean(variable)
tf.summary.scalar(name='%s/mean' % name, tensor=mean)
tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable))
tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable))
tf.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
python类reduce_min()的实例源码
def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name
mean = tf.reduce_mean(variable)
tf.summary.scalar(name='%s/mean' % name, tensor=mean)
tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable))
tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable))
tf.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name
mean = tf.reduce_mean(variable)
tf.summary.scalar(name='%s/mean' % name, tensor=mean)
tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable))
tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable))
tf.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
def _activation_summary(self, x, layer_name):
"""Helper to create summaries for activations.
Args:
x: layer output tensor
layer_name: name of the layer
Returns:
nothing
"""
with tf.variable_scope('activation_summary') as scope:
tf.summary.histogram(
'activation_summary/'+layer_name, x)
tf.summary.scalar(
'activation_summary/'+layer_name+'/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/average', tf.reduce_mean(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/max', tf.reduce_max(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/min', tf.reduce_min(x))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
vocab_size = predictions.get_shape().as_list()[1]
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
neg_labels = 1 - float_labels
predictions_pos = predictions*float_labels+10*neg_labels
predictions_minpos = tf.reduce_min(predictions_pos,axis=1,keep_dims=True)
predictions_neg = predictions*neg_labels-10*float_labels
predictions_maxneg = tf.reduce_max(predictions_neg,axis=1,keep_dims=True)
mask_1 = tf.cast(tf.greater_equal(predictions_neg, predictions_minpos),dtype=tf.float32)
mask_2 = tf.cast(tf.less_equal(predictions_pos, predictions_maxneg),dtype=tf.float32)
cross_entropy_loss = cross_entropy_loss*(mask_1+mask_2)*10 + cross_entropy_loss
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def get_image_summary(img, idx=0):
"""
Make an image summary for 4d tensor image with index idx
"""
V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
V -= tf.reduce_min(V)
V /= tf.reduce_max(V)
V *= 255
img_w = tf.shape(img)[1]
img_h = tf.shape(img)[2]
V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
V = tf.transpose(V, (2, 0, 1))
V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
return V
def bag_hinge_loss(config, preds, sent_mask, flip_sent_mask, hete_mask,
sent_trgt, sent_num):
""" HINGE LOSS:
DEFINED AS: MAX(0, M - MIN(SENT+) - MAX(SENT-))
THIS ONLY APPLIES TO HETE BAGS.
"""
flip_sent_trgt = \
tf.constant(1, shape=[config.batch_size,sent_num], dtype=config.data_type) - \
sent_trgt
pos_preds = preds + flip_sent_trgt + flip_sent_mask # [batch_size, sent_num]
neg_preds = preds * flip_sent_trgt * sent_mask # [batch_size, sent_num]
min_pos_pred = tf.reduce_min(pos_preds, 1)
# min_pos_pred = tf.Print(min_pos_pred, [min_pos_pred], message='min_pos_pred')
max_neg_pred = tf.reduce_max(neg_preds, 1)
# max_neg_pred = tf.Print(max_neg_pred, [max_neg_pred], message='max_neg_pred')
hinge_loss = hete_mask * tf.reduce_max(tf.pack(
[tf.constant(0, shape=[config.batch_size], dtype=config.data_type),
(0.20 - min_pos_pred + max_neg_pred)], axis=1), 1) # [batch_size]
# hinge_loss = tf.Print(hinge_loss, [hinge_loss], message='hinge_loss', summarize=20)
avg_hinge_loss = tf.reduce_sum(hinge_loss) / (tf.reduce_sum(hete_mask) + 1e-12)
return avg_hinge_loss
def variable_summaries(var, name, collections=None):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization).
Args:
- var: Tensor for variable from which we want to log.
- name: Variable name.
- collections: List of collections to save the summary to.
"""
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean, collections)
num_params = tf.reduce_prod(tf.shape(var))
tf.summary.scalar('num_params', num_params, collections)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev, collections)
tf.summary.scalar('max', tf.reduce_max(var), collections)
tf.summary.scalar('min', tf.reduce_min(var), collections)
tf.summary.histogram('histogram', var, collections)
tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
def _conform_kernel_to_tensor(kernel, tensor, shape):
""" Re-shape a convolution kernel to match the given tensor's color dimensions. """
l = len(kernel)
channels = shape[-1]
temp = np.repeat(kernel, channels)
temp = tf.reshape(temp, (l, l, channels, 1))
temp = tf.cast(temp, tf.float32)
temp /= tf.maximum(tf.reduce_max(temp), tf.reduce_min(temp) * -1)
return temp
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with miminum values of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
def seg_prediction(self):
outputs, size, batch_size = self.outputs
num_class = self.config.num_class
output_w = weight_variable([size, num_class])
output_b = bias_variable([num_class])
# outputs = tf.transpose(outputs,[1,0,2])
tag_trans = weight_variable([num_class, num_class])
def transition(p, x):
res = tf.matmul(x, output_w) + output_b
# deviation = tf.tile(tf.expand_dims(tf.reduce_min(previous_pred, reduction_indices=1), 1),
# [1, num_class])
# previous_pred -= deviation
focus = 1.
res += tf.matmul(p, tag_trans) * focus
prediction = tf.nn.softmax(res)
return prediction
# Recurrent network.
pred = tf.scan(transition, outputs, initializer=tf.zeros([batch_size, num_class]), parallel_iterations=100)
pred = tf.transpose(pred, [1, 0, 2])
return pred
def pos_prediction(self):
outputs, size, batch_size = self.outputs
num_class = len(POS_tagging['P'])
output_w = weight_variable([size, num_class])
output_b = bias_variable([num_class])
# outputs = tf.transpose(outputs,[1,0,2])
tag_trans = weight_variable([num_class, num_class])
outputs = tf.reverse(outputs, [True, False, False])
def transition(previous_pred, x):
res = tf.matmul(x, output_w) + output_b
deviation = tf.tile(tf.expand_dims(tf.reduce_min(previous_pred, reduction_indices=1), 1),
[1, num_class])
previous_pred -= deviation
focus = 0.5
res += tf.matmul(previous_pred, tag_trans) * focus
prediction = tf.nn.softmax(res)
return prediction
# Recurrent network.
pred = tf.scan(transition, outputs, initializer=tf.zeros([batch_size, num_class]), parallel_iterations=100)
pred = tf.reverse(pred, [True, False, False])
pred = tf.transpose(pred, [1, 0, 2])
return pred
def get_image_summary(img, idx=0):
"""
Make an image summary for 4d tensor image with index idx
"""
V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
V -= tf.reduce_min(V)
V /= tf.reduce_max(V)
V *= 255
img_w = tf.shape(img)[1]
img_h = tf.shape(img)[2]
V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
V = tf.transpose(V, (2, 0, 1))
V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
return V
def summary_param(op, tensor, ndims, name, collections=None):
"""
Add summary as per the ops mentioned
Args:
op: name of the summary op; e.g. 'stddev'
available ops: ['scalar', 'histogram', 'sparsity', 'mean', 'rms', 'stddev', 'norm', 'max', 'min']
tensor: the tensor to add summary
ndims: dimension of the tensor
name: name of the op
collections: training or validation collections
"""
return {
'scalar': tf.summary.scalar(name, tensor, collections=collections) if ndims == 0 else tf.summary.scalar(name + '/mean', tf.reduce_mean(tensor), collections=collections),
'histogram': tf.summary.histogram(name, tensor, collections=collections) if ndims >= 2 else None,
'sparsity': tf.summary.scalar(name + '/sparsity', tf.nn.zero_fraction(tensor), collections=collections),
'mean': tf.summary.scalar(name + '/mean', tf.reduce_mean(tensor), collections=collections),
'rms': tf.summary.scalar(name + '/rms', rms(tensor), collections=collections),
'stddev': tf.summary.scalar(name + '/stddev', tf.sqrt(tf.reduce_sum(tf.square(tensor - tf.reduce_mean(tensor, name='mean_op'))), name='stddev_op'), collections=collections),
'max': tf.summary.scalar(name + '/max', tf.reduce_max(tensor), collections=collections),
'min': tf.summary.scalar(name + '/min', tf.reduce_min(tensor), collections=collections),
'norm': tf.summary.scalar(name + '/norm', tf.sqrt(tf.reduce_sum(tensor * tensor)), collections=collections),
}[op]
def _reduce_max(self, input_tensor, reduction_indices, c):
"""
a constrainable version of tf.reduce_max
Parameters:
-----------
input_tensor: Tensor
reduction_indices: Tensor
c: Tensor
The constraints tensor
A tensor of 0s and 1s where 1s represent the elements the reduction
should be made on, and 0s represent discarded elements
"""
min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
not_c = tf.abs(c - 1)
return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
def _reduce_max(self, input_tensor, reduction_indices, c):
"""
a constrainable version of tf.reduce_max
Parameters:
-----------
input_tensor: Tensor
reduction_indices: Tensor
c: Tensor
The constraints tensor
A tensor of 0s and 1s where 1s represent the elements the reduction
should be made on, and 0s represent discarded elements
"""
with self.session.graph.as_default():
min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
not_c = tf.abs(c - 1)
return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
def _argmax(self, input_tensor, dimension, c):
"""
a constrainable version of tf.argmax
Parameters:
-----------
input_tensor: Tensor
dimension: Tensor
c: Tensor
The constraints tensor
A tensor of 0s and 1s where 1s represent the elements the reduction
should be made on, and 0s represent discarded elements
"""
with self.session.graph.as_default():
min_values = tf.reduce_min(input_tensor, reduction_indices=[dimension,], keep_dims=True)
not_c = tf.abs(c - 1)
return tf.argmax(input_tensor * c + not_c * min_values, dimension)
def _reduce_max(self, input_tensor, reduction_indices, c):
"""
a constrainable version of tf.reduce_max
Parameters:
-----------
input_tensor: Tensor
reduction_indices: Tensor
c: Tensor
The constraints tensor
A tensor of 0s and 1s where 1s represent the elements the reduction
should be made on, and 0s represent discarded elements
"""
min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
not_c = tf.abs(c - 1)
return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
def _reduce_max(self, input_tensor, reduction_indices, c):
"""
a constrainable version of tf.reduce_max
Parameters:
-----------
input_tensor: Tensor
reduction_indices: Tensor
c: Tensor
The constraints tensor
A tensor of 0s and 1s where 1s represent the elements the reduction
should be made on, and 0s represent discarded elements
"""
with self.session.graph.as_default():
min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
not_c = tf.abs(c - 1)
return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
def curvature_range(self):
# set up the curvature window
self._curv_win = \
tf.Variable(np.zeros( [self._curv_win_width, ] ), dtype=tf.float32, name="curv_win", trainable=False)
self._curv_win = tf.scatter_update(self._curv_win,
self._global_step % self._curv_win_width, self._grad_norm_squared)
# note here the iterations start from iteration 0
valid_window = tf.slice(self._curv_win, tf.constant( [0, ] ),
tf.expand_dims(tf.minimum(tf.constant(self._curv_win_width), self._global_step + 1), dim=0) )
self._h_min_t = tf.reduce_min(valid_window)
self._h_max_t = tf.reduce_max(valid_window)
curv_range_ops = []
with tf.control_dependencies([self._h_min_t, self._h_max_t] ):
avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t] )
with tf.control_dependencies([avg_op] ):
self._h_min = tf.identity(self._moving_averager.average(self._h_min_t) )
self._h_max = tf.identity(self._moving_averager.average(self._h_max_t) )
curv_range_ops.append(avg_op)
return curv_range_ops
def variable_summaries(var):
"""Attatch summaries of a variable to a Tensor for TensorBoard.
Args:
var (tf.Tensor): Tensor variable.
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_summaries(self):
self.min_Df = tf.reduce_min(self.Df)
self.max_Df = tf.reduce_max(self.Df)
self.min_Dr = tf.reduce_min(self.Dr)
self.max_Dr = tf.reduce_max(self.Dr)
tf.summary.scalar('D_0_z', tf.reduce_mean(self.Df[0]))
tf.summary.scalar('min_D_z', self.min_Df)
tf.summary.scalar('max_D_z', self.max_Df)
tf.summary.scalar('D_0_x', tf.reduce_mean(self.Dr[0]))
tf.summary.scalar('min_D_x', self.min_Dr)
tf.summary.scalar('max_D_x', self.max_Dr)
tf.summary.histogram('D_f', self.Df)
tf.summary.histogram('D_r', self.Dr)
for ind in range(len(self.D_losses)):
tf.summary.scalar('D_%d_Loss' % ind, self.D_losses[ind])
tf.summary.scalar('G_loss', self.G_loss)
for ind in range(len(self.V_D)):
tf.summary.scalar('V_D_%d' % ind, self.V_D[ind])
tf.summary.scalar('V_G', self.V_G)
def variable_summaries(var, name):
"""
Attach a lot of summaries to a Tensor for Tensorboard visualization.
Ref: https://www.tensorflow.org/versions/r0.11/how_tos/summaries_and_tensorboard/index.html
:param var: Variable to summarize
:param name: Summary name
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with miminum values of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with miminum values of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
def quantParam(): #pass saved n/w * suffix
paramDict = {}
minMaxDict = {}
suffix = ["conv","_w:0"]
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./LenetParam.meta')
saver.restore(sess,'./LenetParam')
conv_wts = [v.name for v in tf.trainable_variables() if (v.name.startswith(suffix[0]) & v.name.endswith(suffix[1]))]
lay_name = [v.name for v in tf.trainable_variables() if (v.name.endswith("_w:0") | v.name.endswith("_b:0"))]
for v in lay_name:
curLay = [a for a in tf.trainable_variables() if (a.name==v)]
curWt = curLay[0].eval()
if v in conv_wts:
quantWt = tf.quantize_v2(curWt,tf.reduce_min(curWt),tf.reduce_max(curWt),tf.qint16,
mode="MIN_FIRST",name="quant32to16")
chk = sess.run(quantWt)
paramDict.update({v:chk.output})
minMaxDict.update({v:[chk.output_min,chk.output_max]})
else:
chk = curWt
paramDict.update({v:chk})
print(paramDict.keys())
print(minMaxDict.keys())
return paramDict, minMaxDict
def learn_comb(poses, dm_shape, batch_size, max_length, n_dims, reuse=None, _float_type=tf.float32):
with tf.variable_scope("learn_comb", reuse=reuse):
comb_matrix = tf.get_variable(
"matrix", [dm_shape[0], dm_shape[1]],
initializer=identity_initializer(0.01),
dtype=_float_type, trainable=True
)
norm_comb_matrix = comb_matrix / tf.reduce_sum(comb_matrix, axis=0, keep_dims=True)
poses = tf.transpose(poses, [0, 1, 3, 2])
poses = tf.reshape(poses, [batch_size * max_length * n_dims, dm_shape[0]])
poses = tf.matmul(poses, norm_comb_matrix)
poses = tf.reshape(poses, [batch_size, max_length, n_dims, dm_shape[0]])
poses = tf.transpose(poses, [0, 1, 3, 2])
poses = tf.reshape(poses, [batch_size, max_length, dm_shape[0], n_dims])
cb_min = tf.reduce_min(norm_comb_matrix)
cb_max = tf.reduce_max(norm_comb_matrix)
comb_matrix_image = (norm_comb_matrix - cb_min) / (cb_max - cb_min) * 255.0
comb_matrix_image = tf.cast(comb_matrix_image, tf.uint8)
comb_matrix_image = tf.reshape(comb_matrix_image, [1, dm_shape[0], dm_shape[1], 1])
return poses, comb_matrix_image
def learn_comb_unc(poses, dm_shape, batch_size, max_length, n_dims, reuse=None, _float_type=tf.float32):
with tf.variable_scope("learn_comb", reuse=reuse):
comb_matrix = tf.get_variable(
"matrix", [dm_shape[0], dm_shape[1]],
initializer=identity_initializer(0.01),
dtype=_float_type, trainable=True
)
poses = tf.transpose(poses, [0, 1, 3, 2])
poses = tf.reshape(poses, [batch_size * max_length * n_dims, dm_shape[0]])
poses = tf.matmul(poses, comb_matrix)
poses = tf.reshape(poses, [batch_size, max_length, n_dims, dm_shape[0]])
poses = tf.transpose(poses, [0, 1, 3, 2])
poses = tf.reshape(poses, [batch_size, max_length, dm_shape[0], n_dims])
cb_min = tf.reduce_min(comb_matrix)
cb_max = tf.reduce_max(comb_matrix)
comb_matrix_image = (comb_matrix - cb_min) / (cb_max - cb_min) * 255.0
comb_matrix_image = tf.cast(comb_matrix_image, tf.uint8)
comb_matrix_image = tf.reshape(comb_matrix_image, [1, dm_shape[0], dm_shape[1], 1])
return poses, comb_matrix_image
def learn_comb_centered(poses, dm_shape, batch_size, max_length, n_dims, reuse=None, _float_type=tf.float32):
with tf.variable_scope("learn_comb", reuse=reuse):
comb_matrix = tf.get_variable(
"matrix", [dm_shape[0], dm_shape[1]],
initializer=identity_initializer(0.01),
dtype=_float_type, trainable=True
)
pcenter = tf.reduce_mean(poses, axis=2, keep_dims=True)
poses = poses - pcenter
poses = tf.transpose(poses, [0, 1, 3, 2])
poses = tf.reshape(poses, [batch_size * max_length * n_dims, dm_shape[0]])
poses = tf.matmul(poses, comb_matrix)
poses = tf.reshape(poses, [batch_size, max_length, n_dims, dm_shape[0]])
poses = tf.transpose(poses, [0, 1, 3, 2])
poses = tf.reshape(poses, [batch_size, max_length, dm_shape[0], n_dims])
cb_min = tf.reduce_min(comb_matrix)
cb_max = tf.reduce_max(comb_matrix)
comb_matrix_image = (comb_matrix - cb_min) / (cb_max - cb_min) * 255.0
comb_matrix_image = tf.cast(comb_matrix_image, tf.uint8)
comb_matrix_image = tf.reshape(comb_matrix_image, [1, dm_shape[0], dm_shape[1], 1])
return poses, comb_matrix_image
def create_model(self,
model_input,
vocab_size,
num_frames,
**unused_params):
shape = model_input.get_shape().as_list()
frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
frames_true = tf.ones(tf.shape(frames_sum))
frames_false = tf.zeros(tf.shape(frames_sum))
frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])
activation_1 = tf.reduce_max(model_input, axis=1)
activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
activation_3 = tf.reduce_min(model_input, axis=1)
model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[shape[2], 3, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result