def __call__(self, inputs, is_train=True, is_debug=False):
self.is_train = is_train
self.is_debug = is_debug
inputs = tf.convert_to_tensor(inputs) # Check if necessary
# Assert that input is in [-1, 1]
encoder_max_assert_op = tf.Assert(tf.less_equal(tf.reduce_max(inputs), 1.), [
inputs], summarize=0, name='assert/encoder_max')
encoder_min_assert_op = tf.Assert(tf.greater_equal(tf.reduce_max(inputs), -1.),
[inputs], summarize=0, name='assert/encoder_min')
tf.add_to_collection('Assert', encoder_max_assert_op)
tf.add_to_collection('Assert', encoder_min_assert_op)
assert(inputs.get_shape().as_list() == [self.batch_size] + self.configs.conv_info.input)
with tf.variable_scope(self.name) as scope:
print_message(scope.name)
with tf.variable_scope('conv1') as vscope:
outputs, self.net['w1'], self.net['b1'] = conv3d(
inputs, [self.batch_size] + self.configs.conv_info.l1, is_train=self.is_train,
k=self.configs.conv_info.k1, s=self.configs.conv_info.s1, with_w=True)
if is_debug:
print(vscope.name, outputs)
outputs = tf.layers.dropout(outputs, rate=self.configs.dropout, training=self.is_train, name='outputs')
assert(outputs.get_shape().as_list() == [self.batch_size] + self.configs.conv_info.l1)
self.net['conv1_outputs'] = outputs
with tf.variable_scope('conv2') as vscope:
outputs, self.net['w2'], self.net['b2'] = conv3d(
outputs, [self.batch_size] + self.configs.conv_info.l2, is_train=self.is_train,
k=self.configs.conv_info.k2, s=self.configs.conv_info.s2, with_w=True)
if is_debug:
print(vscope.name, outputs)
outputs = tf.layers.dropout(outputs, rate=self.configs.dropout, training=self.is_train, name='outputs')
assert(outputs.get_shape().as_list() == [self.batch_size] + self.configs.conv_info.l2)
self.net['conv2_outputs'] = outputs
with tf.variable_scope('conv3') as vscope:
outputs, self.net['w3'], self.net['b3'] = conv3d(
outputs, [self.batch_size] + self.configs.conv_info.l3, is_train=self.is_train,
k=self.configs.conv_info.k3, s=self.configs.conv_info.s3, with_w=True)
if is_debug:
print(vscope.name, outputs)
outputs = tf.layers.dropout(outputs, rate=self.configs.dropout, training=self.is_train, name='outputs')
assert(outputs.get_shape().as_list() == [self.batch_size] + self.configs.conv_info.l3)
self.net['conv3_outputs'] = outputs
with tf.variable_scope('fc') as vscope:
fc_dim = reduce(mul, self.configs.conv_info.l3, 1)
outputs = tf.reshape(outputs, [self.batch_size] + [fc_dim], name='reshape')
outputs = linear(outputs, self.latent_dimension)
outputs = tf.nn.relu(outputs)
if is_debug:
print(vscope.name, outputs)
self.net['fc_outputs'] = outputs
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
return outputs
评论列表
文章目录