def conv3d(input_, output_dim, f_size, is_training, scope='conv3d'):
with tf.variable_scope(scope) as scope:
# VGG network uses two 3*3 conv layers to effectively increase receptive field
w1 = tf.get_variable('w1', [f_size, f_size, f_size, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1 = tf.nn.conv3d(input_, w1, strides=[1, 1, 1, 1, 1], padding='SAME')
b1 = tf.get_variable('b1', [output_dim], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.bias_add(conv1, b1)
bn1 = tf.contrib.layers.batch_norm(conv1, is_training=is_training, scope='bn1',
variables_collections=['bn_collections'])
r1 = tf.nn.relu(bn1)
w2 = tf.get_variable('w2', [f_size, f_size, f_size, output_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2 = tf.nn.conv3d(r1, w2, strides=[1, 1, 1, 1, 1], padding='SAME')
b2 = tf.get_variable('b2', [output_dim], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.bias_add(conv2, b2)
bn2 = tf.contrib.layers.batch_norm(conv2, is_training=is_training, scope='bn2',
variables_collections=['bn_collections'])
r2 = tf.nn.relu(bn2)
return r2
评论列表
文章目录