def _attention_iter(self, inputs, lrnSize, itersize, name = 'attention_iter'):
with tf.name_scope(name):
numIn = inputs.get_shape().as_list()[3]
padding = np.floor(lrnSize/2)
pad = tf.pad(inputs, np.array([[0,0],[1,1],[1,1],[0,0]]))
U = self._conv(pad, filters=1, kernel_size=3, strides=1)
pad_2 = tf.pad(U, np.array([[0,0],[padding,padding],[padding,padding],[0,0]]))
sharedK = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([lrnSize,lrnSize, 1, 1]), name= 'shared_weights')
Q = []
C = []
for i in range(itersize):
if i ==0:
conv = tf.nn.conv2d(pad_2, sharedK, [1,1,1,1], padding='VALID', data_format='NHWC')
else:
conv = tf.nn.conv2d(Q[i-1], sharedK, [1,1,1,1], padding='SAME', data_format='NHWC')
C.append(conv)
Q_tmp = tf.nn.sigmoid(tf.add_n([C[i], U]))
Q.append(Q_tmp)
stacks = []
for i in range(numIn):
stacks.append(Q[-1])
pfeat = tf.multiply(inputs,tf.concat(stacks, axis = 3) )
return pfeat
评论列表
文章目录