def inference(self):
""" building blocks:
encoder:6 layers.each layers has two sub-layers. the first is multi-head self-attention mechanism; the second is position-wise fully connected feed-forward network.
for each sublayer. use LayerNorm(x+Sublayer(x)). all dimension=512.
decoder:6 layers.each layers has three sub-layers. the second layer is performs multi-head attention over the ouput of the encoder stack.
for each sublayer. use LayerNorm(x+Sublayer(x)).
"""
# 1.embedding for encoder input & decoder input
# 1.1 position embedding for encoder input
input_x_embeded = tf.nn.embedding_lookup(self.Embedding,self.input_x) #[None,sequence_length, embed_size]
input_x_embeded=tf.multiply(input_x_embeded,tf.sqrt(tf.cast(self.d_model,dtype=tf.float32)))
input_mask=tf.get_variable("input_mask",[self.sequence_length,1],initializer=self.initializer)
input_x_embeded=tf.add(input_x_embeded,input_mask) #[None,sequence_length,embed_size].position embedding.
# 2. encoder
encoder_class=Encoder(self.d_model,self.d_k,self.d_v,self.sequence_length,self.h,self.batch_size,self.num_layer,input_x_embeded,input_x_embeded,dropout_keep_prob=self.dropout_keep_prob,use_residual_conn=self.use_residual_conn)
Q_encoded,K_encoded = encoder_class.encoder_fn() #K_v_encoder
Q_encoded=tf.reshape(Q_encoded,shape=(self.batch_size,-1)) #[batch_size,sequence_length*d_model]
with tf.variable_scope("output"):
logits = tf.matmul(Q_encoded, self.W_projection) + self.b_projection #logits shape:[batch_size*decoder_sent_length,self.num_classes]
print("logits:",logits)
return logits
python类get_variable()的实例源码
a2_transformer_classification.py 文件源码
项目:text_classification
作者: brightmart
项目源码
文件源码
阅读 49
收藏 0
点赞 0
评论 0
def init():
#1. assign value to fields
vocab_size=1000
d_model = 512
d_k = 64
d_v = 64
sequence_length = 5*10
h = 8
batch_size=4*32
initializer = tf.random_normal_initializer(stddev=0.1)
# 2.set values for Q,K,V
vocab_size=1000
embed_size=d_model
Embedding = tf.get_variable("Embedding_E", shape=[vocab_size, embed_size],initializer=initializer)
input_x = tf.placeholder(tf.int32, [batch_size,sequence_length], name="input_x") #[4,10]
print("input_x:",input_x)
embedded_words = tf.nn.embedding_lookup(Embedding, input_x) #[batch_size*sequence_length,embed_size]
Q = embedded_words # [batch_size*sequence_length,embed_size]
K_s = embedded_words # [batch_size*sequence_length,embed_size]
num_layer=6
mask = get_mask(batch_size, sequence_length)
#3. get class object
encoder_class=Encoder(d_model,d_k,d_v,sequence_length,h,batch_size,num_layer,Q,K_s,mask=mask) #Q,K_s,embedded_words
return encoder_class,Q,K_s
def weightVariable(shape,std=1.0,name=None):
# Create a set of weights initialized with truncated normal random values
name = 'weights' if name is None else name
return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
def biasVariable(shape,bias=0.1,name=None):
# create a set of bias nodes initialized with a constant 0.1
name = 'biases' if name is None else name
return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
def weightVariable(shape,std=1.0,name=None):
# Create a set of weights initialized with truncated normal random values
name = 'weights' if name is None else name
return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
def biasVariable(shape,bias=0.1,name=None):
# create a set of bias nodes initialized with a constant 0.1
name = 'biases' if name is None else name
return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
def conv(inputs, kernel_shape, bias_shape, strides, w_i, b_i=None, activation=tf.nn.relu):
# ??tf.layers
# relu1 = tf.layers.conv2d(input_imgs, filters=24, kernel_size=[5, 5], strides=[2, 2],
# padding='SAME', activation=tf.nn.relu,
# kernel_initializer=w_i, bias_initializer=b_i)
weights = tf.get_variable('weights', shape=kernel_shape, initializer=w_i)
conv = tf.nn.conv2d(inputs, weights, strides=strides, padding='SAME')
if bias_shape is not None:
biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
return activation(conv + biases) if activation is not None else conv + biases
return activation(conv) if activation is not None else conv
# ???bias??????relu
def noisy_dense(inputs, units, bias_shape, c_names, w_i, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'):
def f(e_list):
return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5))
# ??tf.layers?????flatten
# dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
if not isinstance(inputs, ops.Tensor):
inputs = ops.convert_to_tensor(inputs, dtype='float')
# dim_list = inputs.get_shape().as_list()
# flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
# reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
if len(inputs.shape) > 2:
inputs = tf.contrib.layers.flatten(inputs)
flatten_shape = inputs.shape[1]
weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
w_noise = tf.get_variable('w_noise', [flatten_shape, units], initializer=w_i, collections=c_names)
if noisy_distribution == 'independent':
weights += tf.multiply(tf.random_normal(shape=w_noise.shape), w_noise)
elif noisy_distribution == 'factorised':
noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32)) # ???????????????
noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32))
weights += tf.multiply(noise_1 * noise_2, w_noise)
dense = tf.matmul(inputs, weights)
if bias_shape is not None:
assert bias_shape[0] == units
biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i, collections=c_names)
if noisy_distribution == 'independent':
biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise)
elif noisy_distribution == 'factorised':
biases += tf.multiply(noise_2, b_noise)
return activation(dense + biases) if activation is not None else dense + biases
return activation(dense) if activation is not None else dense
# ???bias??????relu
gen_embeddings.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def make_skipgram_softmax_loss(embeddings_matrix, vocabulary_size, vector_size):
vectors = tf.get_variable('vectors', (vocabulary_size, vector_size), dtype=tf.float32, initializer=tf.constant_initializer(embeddings_matrix))
minibatch = tf.placeholder(shape=(None, 2), dtype=tf.int32)
center_word_vector = tf.nn.embedding_lookup(vectors, minibatch[:,0])
yhat = tf.matmul(center_word_vector, vectors, transpose_b=True)
predict_word = minibatch[:,1]
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=predict_word, logits=yhat)
loss = tf.reduce_mean(loss)
return vectors, minibatch, loss
def encode(self, inputs, _input_length, _parses):
with tf.variable_scope('BagOfWordsEncoder'):
W = tf.get_variable('W', (self.embed_size, self.output_size))
b = tf.get_variable('b', shape=(self.output_size,), initializer=tf.constant_initializer(0, tf.float32))
enc_hidden_states = tf.tanh(tf.tensordot(inputs, W, [[2], [0]]) + b)
enc_final_state = tf.reduce_sum(enc_hidden_states, axis=1)
#assert enc_hidden_states.get_shape()[1:] == (self.config.max_length, self.config.hidden_size)
if self._cell_type == 'lstm':
enc_final_state = (tf.contrib.rnn.LSTMStateTuple(enc_final_state, enc_final_state),)
enc_output = tf.nn.dropout(enc_hidden_states, keep_prob=self._dropout, seed=12345)
return enc_output, enc_final_state
base_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def add_input_op(self, xavier):
with tf.variable_scope('embed'):
# first the embed the input
if self.config.train_input_embeddings:
if self.config.input_embedding_matrix:
initializer = tf.constant_initializer(self.config.input_embedding_matrix)
else:
initializer = xavier
input_embed_matrix = tf.get_variable('input_embedding',
shape=(self.config.dictionary_size, self.config.embed_size),
initializer=initializer)
else:
input_embed_matrix = tf.constant(self.config.input_embedding_matrix)
# dictionary size x embed_size
assert input_embed_matrix.get_shape() == (self.config.dictionary_size, self.config.embed_size)
# now embed the output
if self.config.train_output_embeddings:
output_embed_matrix = tf.get_variable('output_embedding',
shape=(self.config.output_size, self.config.output_embed_size),
initializer=xavier)
else:
output_embed_matrix = tf.constant(self.config.output_embedding_matrix)
assert output_embed_matrix.get_shape() == (self.config.output_size, self.config.output_embed_size)
inputs = tf.nn.embedding_lookup([input_embed_matrix], self.input_placeholder)
# batch size x max length x embed_size
assert inputs.get_shape()[1:] == (self.config.max_length, self.config.embed_size)
return inputs, output_embed_matrix
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def add_decoder_op(self, enc_final_state, enc_hidden_states, output_embed_matrix, training):
cell_dec = tf.contrib.rnn.MultiRNNCell([self.make_rnn_cell(i, True) for i in range(self.config.rnn_layers)])
encoder_hidden_size = int(enc_hidden_states.get_shape()[-1])
decoder_hidden_size = int(cell_dec.output_size)
# if encoder and decoder have different sizes, add a projection layer
if encoder_hidden_size != decoder_hidden_size:
assert False, (encoder_hidden_size, decoder_hidden_size)
with tf.variable_scope('hidden_projection'):
kernel = tf.get_variable('kernel', (encoder_hidden_size, decoder_hidden_size), dtype=tf.float32)
# apply a relu to the projection for good measure
enc_final_state = nest.map_structure(lambda x: tf.nn.relu(tf.matmul(x, kernel)), enc_final_state)
enc_hidden_states = tf.nn.relu(tf.tensordot(enc_hidden_states, kernel, [[2], [1]]))
else:
# flatten and repack the state
enc_final_state = nest.pack_sequence_as(cell_dec.state_size, nest.flatten(enc_final_state))
if self.config.connect_output_decoder:
cell_dec = ParentFeedingCellWrapper(cell_dec, enc_final_state)
else:
cell_dec = InputIgnoringCellWrapper(cell_dec, enc_final_state)
if self.config.apply_attention:
attention = LuongAttention(self.config.decoder_hidden_size, enc_hidden_states, self.input_length_placeholder,
probability_fn=tf.nn.softmax)
cell_dec = AttentionWrapper(cell_dec, attention,
cell_input_fn=lambda inputs, _: inputs,
attention_layer_size=self.config.decoder_hidden_size,
initial_cell_state=enc_final_state)
enc_final_state = cell_dec.zero_state(self.batch_size, dtype=tf.float32)
decoder = Seq2SeqDecoder(self.config, self.input_placeholder, self.input_length_placeholder,
self.output_placeholder, self.output_length_placeholder, self.batch_number_placeholder)
return decoder.decode(cell_dec, enc_final_state, self.config.grammar.output_size, output_embed_matrix, training)
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def build_decoder(self):
"""Inference Network. p(X|h)"""
with tf.variable_scope("decoder"):
R = tf.get_variable("R", [self.reader.vocab_size, self.h_dim])
b = tf.get_variable("b", [self.reader.vocab_size])
x_i = tf.diag([1.]*self.reader.vocab_size)
e = -tf.matmul(tf.matmul(self.h, R, transpose_b=True), x_i) + b
self.p_x_i = tf.squeeze(tf.nn.softmax(e))
def build_generator(self):
"""Inference Network. p(X|h)"""
with tf.variable_scope("generator"):
self.R = tf.get_variable("R", [self.reader.vocab_size, self.h_dim])
self.b = tf.get_variable("b", [self.reader.vocab_size])
self.e = -tf.matmul(self.h, self.R, transpose_b=True) + self.b
self.p_x_i = tf.squeeze(tf.nn.softmax(self.e))
def create_model(self,
model_input,
vocab_size,
num_frames,
**unused_params):
shape = model_input.get_shape().as_list()
frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
frames_true = tf.ones(tf.shape(frames_sum))
frames_false = tf.zeros(tf.shape(frames_sum))
frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])
activation_1 = tf.reduce_max(model_input, axis=1)
activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
activation_3 = tf.reduce_min(model_input, axis=1)
model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[shape[2], 3, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
with tf.variable_scope(self.name) as scope:
self.beta = tf.get_variable("beta", shape[1:],
initializer=tf.constant_initializer(0.))
self.gamma = tf.get_variable("gamma", shape[1:],
initializer=tf.random_normal_initializer(1.,0.02))
self.mean = tf.get_variable("mean", shape[1:],
initializer=tf.constant_initializer(0.),trainable=False)
self.variance = tf.get_variable("variance",shape[1:],
initializer=tf.constant_initializer(1.),trainable=False)
if train:
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
self.mean.assign(batch_mean)
self.variance.assign(batch_var)
ema_apply_op = self.ema.apply([self.mean, self.variance])
with tf.control_dependencies([ema_apply_op]):
mean, var = tf.identity(batch_mean), tf.identity(batch_var)
else:
mean, var = self.ema.average(self.mean), self.ema.average(self.variance)
normed = tf.nn.batch_normalization(x, mean, var, self.beta, self.gamma, self.epsilon)
return normed
def cnn(self,
model_input,
l2_penalty=1e-8,
num_filters = [1024, 1024, 1024],
filter_sizes = [1,2,3],
sub_scope="",
**unused_params):
max_frames = model_input.get_shape().as_list()[1]
num_features = model_input.get_shape().as_list()[2]
shift_inputs = []
for i in range(max(filter_sizes)):
if i == 0:
shift_inputs.append(model_input)
else:
shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])
cnn_outputs = []
for nf, fs in zip(num_filters, filter_sizes):
sub_input = tf.concat(shift_inputs[:fs], axis=2)
sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
shape=[num_features*fs, nf], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))
cnn_output = tf.concat(cnn_outputs, axis=2)
cnn_output = slim.batch_norm(
cnn_output,
center=True,
scale=True,
is_training=FLAGS.train,
scope=sub_scope+"cluster_bn")
return cnn_output, max_frames
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params):
num_extend = FLAGS.moe_num_extend
num_layers = num_extend
lstm_size = FLAGS.lstm_cells
pool_size=2
cnn_input = model_input
num_filters=[256,256,512]
filter_sizes=[1,2,3]
features_size = sum(num_filters)
final_probilities = []
moe_inputs = []
for layer in range(num_layers):
cnn_output, num_t = self.cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1))
cnn_output = tf.nn.relu(cnn_output)
cnn_multiscale = self.rnn(cnn_output,lstm_size, num_frames,sub_scope="rnn%d"%(layer+1))
moe_inputs.append(cnn_multiscale)
final_probility = self.sub_moe(cnn_multiscale,vocab_size,scopename="moe%d"%(layer+1))
final_probilities.append(final_probility)
num_t = pool_size*(num_t//pool_size)
cnn_output = tf.reshape(cnn_output[:,:num_t,:],[-1,num_t//pool_size,pool_size,features_size])
cnn_input = tf.reduce_max(cnn_output, axis=2)
num_frames = tf.maximum(num_frames//pool_size,1)
final_probilities = tf.stack(final_probilities,axis=1)
moe_inputs = tf.stack(moe_inputs,axis=1)
weight2d = tf.get_variable("ensemble_weight2d",
shape=[num_extend, features_size, vocab_size],
regularizer=slim.l2_regularizer(1.0e-8))
weight = tf.nn.softmax(tf.einsum("aij,ijk->aik", moe_inputs, weight2d), dim=1)
result = {}
result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
return result