def densenet_block(inputs, layer_num, growth, bc_mode, scope, is_training, keep_prob):
with tf.variable_scope(scope, 'block1', [inputs]):
currents = inputs
for idx in xrange(layer_num):
if not bc_mode:
new_feature = slim.conv2d(currents, growth,
[3, 3], scope='conv_{:d}'.format(idx))
new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
is_training=is_training,
scope='dropout_{:d}'.format(idx))
else:
new_feature = slim.conv2d(currents, growth*4,
[1, 1], scope='bottom_{:d}'.format(idx))
new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
is_training=is_training,
scope='dropout_b_{:d}'.format(idx))
new_feature = slim.conv2d(new_feature, growth,
[3, 3], scope='conv_{:d}'.format(idx))
new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
is_training=is_training,
scope='dropout_{:d}'.format(idx))
currents = tf.concat([currents, new_feature], axis=3)
return currents
评论列表
文章目录