def fc(self, output_nodes, keep_prob=1, activation_fn=tf.nn.relu, b_value=0.0, s_value=None, bn=False, stoch=False, ladder=False, clean=False):
self.count['fc'] += 1
self._layer_count += 1
scope = 'fc_' + str(self.count['fc'])
with tf.variable_scope(scope):
input_nodes = self.input.get_shape()[1]
output_shape = [input_nodes, output_nodes]
w = self.weight_variable(name='weights', shape=output_shape)
self.input = tf.matmul(self.input, w)
if bn is True:
self.input = self.batch_norm(self.input, clean=clean, count=self._layer_count)
if ladder is True:
b_value = s_value = None
noisy_z_ind = self.layer_num - self.count['deconv'] - self.count['fc']
noisy_z = self._noisy_z_dict[noisy_z_ind]
z_hat = self.ladder_g_function(noisy_z, self.input)
self._z_hat_bn[noisy_z_ind] = (z_hat - self.clean_batch_dict[noisy_z_ind][0]) / self.clean_batch_dict[noisy_z_ind][1]
if stoch is True:
self.input = tf.random_normal(tf.shape(self.input)) + self.input
self._noisy_z_dict[self._layer_count] = self.input
if b_value is not None:
b = self.const_variable(name='bias', shape=[output_nodes], value=b_value)
self.input = tf.add(self.input, b)
if s_value is not None:
s = self.const_variable(name='scale', shape=[output_nodes], value=s_value)
self.input = tf.multiply(self.input, s)
if activation_fn is not None:
self.input = activation_fn(self.input)
if keep_prob != 1:
self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
self.print_log(scope + ' output: ' + str(self.input.get_shape()))
评论列表
文章目录