def DCGRU(self, mem, kernel_width, prefix):
"""Convolutional diagonal GRU."""
def conv_lin(input, suffix, bias_start):
return self.conv_linear(input, kernel_width, self.num_units, self.num_units, bias_start,prefix + "/" + suffix)
# perform shift
mem_shifted = tf.squeeze(tf.nn.depthwise_conv2d(tf.expand_dims(mem,1), self.shift_filter,[1,1,1,1],'SAME'),[1])
# calculate the new value
reset = self.hard_sigmoid(conv_lin(mem, "r", 0.5))
candidate = self.hard_tanh(conv_lin(reset * mem, "c", 0.0))
gate = self.hard_sigmoid(conv_lin(mem, "g", 0.7))
candidate =self.dropout(candidate)
candidate = gate*mem_shifted + (1 - gate)*candidate
return candidate
评论列表
文章目录