def _attention(self, prev_decoder_state, prev_embedding):
with tf.variable_scope('attention') as scope:
# e = score of shape [batch_size, output_seq_length, input_seq_length], e_{ij} = score(s_{i-1}, h_j)
# e_i = score of shape [batch_size, input_seq_length], e_ij = score(prev_decoder_state, h_j)
e_i = self._score(prev_decoder_state, prev_embedding)
# alpha_i = softmax(e_i) of shape [batch_size, input_seq_length]
alpha_i = tf.nn.softmax(e_i)
resized_alpha_i = tf.reshape(tf.tile(alpha_i, [1, self.encoder_output_size]),
[self.batch_size, -1, self.encoder_output_size])
if self.mode == 'decode':
c_i = tf.reduce_sum(tf.multiply(resized_alpha_i, self.pre_computed_encoder_states_placeholder), axis=1)
else:
c_i = tf.reduce_sum(tf.multiply(resized_alpha_i, self.encoder_outputs), axis=1)
return c_i, e_i
python类reduce_sum()的实例源码
def actor_loss(self):
if self.config.mode == 'discrete':
log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.action_input, self.action_dim, dtype=tf.float32),
axis=1, keep_dims=True)
# use entropy to encourage exploration
exp_v = log_prob * self.TD_loss
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True) # encourage exploration
exp_v = self.config.ENTROPY_BETA * entropy + exp_v
return tf.reduce_mean(-exp_v) # ????????log_prb????????????????????TD_loss
elif self.config.mode == 'continuous':
log_prob = self.action_normal_dist.log_prob(self.action_input)
exp_v = log_prob * self.TD_loss
# use entropy to encourage exploration
exp_v = self.config.ENTROPY_BETA * self.action_normal_dist.entropy() + exp_v
return tf.reduce_mean(-exp_v)
def encode(self, inputs, _input_length, _parses):
with tf.variable_scope('BagOfWordsEncoder'):
W = tf.get_variable('W', (self.embed_size, self.output_size))
b = tf.get_variable('b', shape=(self.output_size,), initializer=tf.constant_initializer(0, tf.float32))
enc_hidden_states = tf.tanh(tf.tensordot(inputs, W, [[2], [0]]) + b)
enc_final_state = tf.reduce_sum(enc_hidden_states, axis=1)
#assert enc_hidden_states.get_shape()[1:] == (self.config.max_length, self.config.hidden_size)
if self._cell_type == 'lstm':
enc_final_state = (tf.contrib.rnn.LSTMStateTuple(enc_final_state, enc_final_state),)
enc_output = tf.nn.dropout(enc_hidden_states, keep_prob=self._dropout, seed=12345)
return enc_output, enc_final_state
def decov_loss(xs):
"""Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf
'Reducing Overfitting In Deep Networks by Decorrelating Representation'
"""
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x-m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0,2,1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5*(corr_frob_sqr - corr_diag_sqr)
return loss
def build_model(self):
self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
self.x_idx = tf.placeholder(tf.int32, [None], name="x_idx")
self.build_encoder()
self.build_generator()
# Kullback Leibler divergence
self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq))
# Log likelihood
self.g_loss = -tf.reduce_sum(tf.log(tf.gather(self.p_x_i, self.x_idx) + 1e-10))
self.loss = self.e_loss + self.g_loss
self.encoder_var_list, self.generator_var_list = [], []
for var in tf.trainable_variables():
if "encoder" in var.name:
self.encoder_var_list.append(var)
elif "generator" in var.name:
self.generator_var_list.append(var)
# optimizer for alternative update
self.optim_e = tf.train.AdamOptimizer(learning_rate=self.lr) \
.minimize(self.e_loss, global_step=self.step, var_list=self.encoder_var_list)
self.optim_g = tf.train.AdamOptimizer(learning_rate=self.lr) \
.minimize(self.g_loss, global_step=self.step, var_list=self.generator_var_list)
# optimizer for one shot update
self.optim = tf.train.AdamOptimizer(learning_rate=self.lr) \
.minimize(self.loss, global_step=self.step)
_ = tf.scalar_summary("encoder loss", self.e_loss)
_ = tf.scalar_summary("generator loss", self.g_loss)
_ = tf.scalar_summary("total loss", self.loss)
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
max_frames = model_input.get_shape().as_list()[1]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
output = slim.fully_connected(
avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
def sub_moe(self,
model_input,
vocab_size,
num_mixtures = None,
l2_penalty=1e-8,
scopename="",
**unused_params):
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates"+scopename)
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts"+scopename)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return model_input, final_probabilities
def create_model(self,
model_input,
vocab_size,
num_frames,
**unused_params):
shape = model_input.get_shape().as_list()
frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
frames_true = tf.ones(tf.shape(frames_sum))
frames_false = tf.zeros(tf.shape(frames_sum))
frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])
activation_1 = tf.reduce_max(model_input, axis=1)
activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
activation_3 = tf.reduce_min(model_input, axis=1)
model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[shape[2], 3, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_frames"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
return tf.reduce_sum(cross_entropy_loss, 2)
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_frames"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
return tf.reduce_sum(cross_entropy_loss, axis=2)
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params):
num_extend = FLAGS.moe_num_extend
num_layers = num_extend
lstm_size = FLAGS.lstm_cells
pool_size=2
cnn_input = model_input
num_filters=[256,256,512]
filter_sizes=[1,2,3]
features_size = sum(num_filters)
final_probilities = []
moe_inputs = []
for layer in range(num_layers):
cnn_output, num_t = self.cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1))
cnn_output = tf.nn.relu(cnn_output)
cnn_multiscale = self.rnn(cnn_output,lstm_size, num_frames,sub_scope="rnn%d"%(layer+1))
moe_inputs.append(cnn_multiscale)
final_probility = self.sub_moe(cnn_multiscale,vocab_size,scopename="moe%d"%(layer+1))
final_probilities.append(final_probility)
num_t = pool_size*(num_t//pool_size)
cnn_output = tf.reshape(cnn_output[:,:num_t,:],[-1,num_t//pool_size,pool_size,features_size])
cnn_input = tf.reduce_max(cnn_output, axis=2)
num_frames = tf.maximum(num_frames//pool_size,1)
final_probilities = tf.stack(final_probilities,axis=1)
moe_inputs = tf.stack(moe_inputs,axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[num_extend, features_size, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
weight = tf.nn.softmax(tf.einsum("aij,ijk->aik", moe_inputs, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result
def create_model(self, model_input, vocab_size, num_frames, distill_labels=None, l2_penalty=1e-8, **unused_params):
num_extend = FLAGS.moe_num_extend
num_layers = num_extend
lstm_size = FLAGS.lstm_cells
pool_size = 2
cnn_input = model_input
cnn_size = FLAGS.cnn_cells
num_filters = [cnn_size, cnn_size, cnn_size*2]
filter_sizes = [1, 2, 3]
features_size = sum(num_filters)
final_probilities = []
moe_inputs = []
for layer in range(num_layers):
cnn_output, num_t = self.cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1))
cnn_output = tf.nn.relu(cnn_output)
cnn_multiscale = self.rnn(cnn_output,lstm_size, num_frames,sub_scope="rnn%d"%(layer+1))
moe_inputs.append(cnn_multiscale)
final_probility = self.sub_moe(cnn_multiscale,vocab_size,distill_labels=distill_labels, scopename="moe%d"%(layer+1))
final_probilities.append(final_probility)
num_t = pool_size*(num_t//pool_size)
cnn_output = tf.reshape(cnn_output[:,:num_t,:],[-1,num_t//pool_size,pool_size,features_size])
cnn_input = tf.reduce_max(cnn_output, axis=2)
num_frames = tf.maximum(num_frames//pool_size,1)
final_probilities = tf.stack(final_probilities,axis=1)
moe_inputs = tf.stack(moe_inputs,axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[num_extend, lstm_size, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
weight = tf.nn.softmax(tf.einsum("aij,ijk->aik", moe_inputs, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params):
num_extend = FLAGS.moe_num_extend
num_layers = 10
pool_size=2
cnn_input = model_input
num_filters=[256,256,512]
filter_sizes=[1,2,3]
features_size = sum(num_filters)
for layer in range(num_layers):
cnn_output, num_t = self.cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1))
if layer < 3:
num_t = pool_size*(num_t//pool_size)
cnn_output = tf.reshape(cnn_output[:,:num_t,:],[-1,num_t//pool_size,pool_size,features_size])
cnn_input = tf.reduce_max(cnn_output, axis=2)
else:
cnn_input = cnn_output
cnn_output, num_t = self.kmax(cnn_input, num_filters=features_size, filter_sizes=num_extend, sub_scope="kmax")
cnn_input = tf.reshape(cnn_output,[-1,features_size])
final_probilities = self.sub_moe(cnn_input,vocab_size)
final_probilities = tf.reshape(final_probilities,[-1,num_extend,vocab_size])
weight2d = tf.get_variable("ensemble_weight2d",
shape=[num_extend, features_size, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
weight = tf.nn.softmax(tf.einsum("aij,ijk->aik", cnn_output, weight2d), dim=1)
result = {}
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params):
num_extend = FLAGS.moe_num_extend
num_layers = num_extend
pool_size=2
cnn_input = model_input
num_filters=[256,256,512]
filter_sizes=[1,2,3]
features_size = sum(num_filters)
final_probilities = []
moe_inputs = []
for layer in range(num_layers):
cnn_output, num_t = CnnKmaxModel().cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1), l2_penalty=0.0)
cnn_output = tf.nn.relu(cnn_output)
cnn_multiscale = tf.reduce_max(cnn_output,axis=1)
moe_inputs.append(cnn_multiscale)
final_probility = CnnKmaxModel().sub_moe(cnn_multiscale,vocab_size,scopename="moe%d"%(layer+1), l2_penalty=0.0)
final_probilities.append(final_probility)
num_t = pool_size*(num_t//pool_size)
cnn_output = tf.reshape(cnn_output[:,:num_t,:],[-1,num_t//pool_size,pool_size,features_size])
cnn_input = tf.reduce_max(cnn_output, axis=2)
final_probilities = tf.stack(final_probilities,axis=1)
moe_inputs = tf.stack(moe_inputs,axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[num_extend, features_size, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
weight = tf.nn.softmax(tf.einsum("aij,ijk->aik", tf.stop_gradient(moe_inputs), weight2d), dim=1)
result = {}
result["predictions"] = tf.reduce_sum(tf.stop_gradient(final_probilities)*weight, axis=1)
return result
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params):
num_extend = FLAGS.moe_num_extend
num_layers = num_extend
pool_size=2
cnn_input = model_input
num_filters=[256,256,512]
filter_sizes=[1,2,3]
features_size = sum(num_filters)
final_probilities = []
moe_inputs = []
for layer in range(num_layers):
cnn_output, num_t = self.cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1))
cnn_output = tf.nn.relu(cnn_output)
cnn_multiscale = tf.reduce_max(cnn_output,axis=1)
moe_inputs.append(cnn_multiscale)
final_probility = self.sub_moe(cnn_multiscale,vocab_size,scopename="moe%d"%(layer+1))
final_probilities.append(final_probility)
num_t = pool_size*(num_t//pool_size)
cnn_output = tf.reshape(cnn_output[:,:num_t,:],[-1,num_t//pool_size,pool_size,features_size])
cnn_input = tf.reduce_max(cnn_output, axis=2)
final_probilities = tf.stack(final_probilities,axis=1)
moe_inputs = tf.stack(moe_inputs,axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[num_extend, features_size, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
weight = tf.nn.softmax(tf.einsum("aij,ijk->aik", moe_inputs, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result
def sub_model(self, model_input, vocab_size, num_mixtures=None,
l2_penalty=1e-8, sub_scope="", distill_labels=None,**unused_params):
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input, class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-"+sub_scope)
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-"+sub_scope)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return final_probabilities
def calculate_loss_distill(self, predictions, labels_distill, labels, **unused_params):
with tf.name_scope("loss_distill"):
print("loss_distill")
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
float_labels_distill = tf.cast(labels_distill, tf.float32)
embedding_mat = np.loadtxt("./resources/embedding_matrix.model")
vocab_size = embedding_mat.shape[1]
labels_size = float_labels.get_shape().as_list()[1]
embedding_mat = tf.cast(embedding_mat,dtype=tf.float32)
cross_entropy_loss_1 = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
float_labels_1 = float_labels[:,:vocab_size]
labels_smooth = tf.matmul(float_labels_1,embedding_mat)/tf.reduce_sum(float_labels_1,axis=1,keep_dims=True)
float_classes = labels_smooth
for i in range(labels_size//vocab_size-1):
float_classes = tf.concat((float_classes,labels_smooth),axis=1)
cross_entropy_loss_2 = float_classes * tf.log(predictions + epsilon) + (
1 - float_classes) * tf.log(1 - predictions + epsilon)
cross_entropy_loss_3 = float_labels_distill * tf.log(predictions + epsilon) + (
1 - float_labels_distill) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = cross_entropy_loss_1*0.5 + cross_entropy_loss_2*0.5 + cross_entropy_loss_3*0.5
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_loss_negative(self, predictions_pos, predictions_neg, labels, **unused_params):
with tf.name_scope("loss_negative"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
weight_pos = np.loadtxt(FLAGS.autoencoder_dir+"labels_uni.out")
weight_pos = tf.reshape(tf.cast(weight_pos,dtype=tf.float32),[1,-1])
weight_pos = tf.log(tf.reduce_max(weight_pos)/weight_pos)+1
cross_entropy_loss_1 = float_labels * tf.log(predictions_pos + epsilon)*weight_pos + (
1 - float_labels) * tf.log(1 - predictions_pos + epsilon)
cross_entropy_loss_2 = (1-float_labels) * tf.log(predictions_neg + epsilon) + \
float_labels * tf.log(1 - predictions_neg + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss_1+cross_entropy_loss_2)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_mseloss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_mse"):
float_labels = tf.cast(labels, tf.float32)
mse_loss = tf.square(predictions-float_labels)
return tf.reduce_mean(tf.reduce_sum(mse_loss, 1))
def calculate_loss_postprocess(self, predictions, labels, **unused_params):
with tf.name_scope("loss_postprocess"):
float_labels = tf.cast(labels, tf.float32)
predictions_pos = predictions*float_labels + (1-float_labels)
predictions_neg = predictions*(1-float_labels)
min_pos = tf.stop_gradient(tf.reduce_min(predictions_pos))
max_neg = tf.stop_gradient(tf.reduce_max(predictions_neg))
predictions_pos_mistake = tf.nn.relu(max_neg-predictions_pos)-0.01*tf.nn.relu(predictions_pos-max_neg)
predictions_neg_mistake = tf.nn.relu(predictions_neg-min_pos)-0.01*tf.nn.relu(min_pos-predictions_neg)
postprocess_loss = predictions_pos_mistake + predictions_neg_mistake
return tf.reduce_mean(tf.reduce_sum(postprocess_loss, 1))