def __gradient_ascent(self):
# Gradient ascent
with tf.name_scope('gradient') as _:
self.grad_bias = tf.mul(tf.reduce_mean(self.hid_prob0 - self.hid_prob1, [0, 1, 2]),
self.learning_rate * self.batch_size, name='grad_bias')
self.grad_cias = tf.mul(tf.reduce_mean(self.vis_0 - self.vis_1, [0, 1, 2]),
self.learning_rate * self.batch_size, name='grad_cias')
# TODO: Is there any method to calculate batch-elementwise convolution?
temp_grad_weights = tf.zeros(self.weight_shape)
hid_filter0 = tf.reverse(self.hid_prob0, [False, True, True, False])
hid_filter1 = tf.reverse(self.hid_prob1, [False, True, True, False])
for idx in range(0, self.batch_size):
hid0_ith = self.__get_ith_hid_4d(hid_filter0, idx)
hid1_ith = self.__get_ith_hid_4d(hid_filter1, idx)
positive = [0] * self.depth
negative = [0] * self.depth
one_ch_conv_shape = [self.width, self.height, 1, self.num_features]
for jdx in range(0, self.depth):
positive[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_0, idx, jdx), hid0_ith),
one_ch_conv_shape)
negative[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_1, idx, jdx), hid1_ith),
one_ch_conv_shape)
positive = tf.concat(2, positive)
negative = tf.concat(2, negative)
temp_grad_weights = tf.add(temp_grad_weights,
tf.slice(tf.sub(positive, negative), [0, 0, 0, 0], self.weight_shape))
self.grad_weights = tf.mul(temp_grad_weights, self.learning_rate / (self.width * self.height))
self.gradient_ascent = [self.weights.assign_add(self.grad_weights),
self.bias.assign_add(self.grad_bias),
self.cias.assign_add(self.grad_cias)]
评论列表
文章目录