def _block_b_reduce_res(net, endpoints, ver=2, scope='BlockReduceB'):
# 17 x 17 -> 8 x 8 reduce
# configure branch filter numbers
br3_num = 256
br4_num = 256
if ver == 1:
br3_inc = 0
br4_inc = 0
else:
br3_inc = 32
br4_inc = 32
with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
with tf.variable_scope(scope):
with tf.variable_scope('Br1_Pool'):
br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
with tf.variable_scope('Br2_3x3'):
br2 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1')
br2 = layers.conv2d(br2, 384, [3, 3], stride=2, scope='Conv2_3x3/2')
with tf.variable_scope('Br3_3x3'):
br3 = layers.conv2d(net, br3_num, [1, 1], padding='SAME', scope='Conv1_1x1')
br3 = layers.conv2d(br3, br3_num + br3_inc, [3, 3], stride=2, scope='Conv2_3x3/2')
with tf.variable_scope('Br4_3x3Dbl'):
br4 = layers.conv2d(net, br4_num, [1, 1], padding='SAME', scope='Conv1_1x1')
br4 = layers.conv2d(br4, br4_num + 1*br4_inc, [3, 3], padding='SAME', scope='Conv2_3x3')
br4 = layers.conv2d(br4, br4_num + 2*br4_inc, [3, 3], stride=2, scope='Conv3_3x3/2')
net = tf.concat(3, [br1, br2, br3, br4], name='Concat1')
# 8 x 8 x 1792 v1, 2144 v2 (paper indicates 2048 but only get this if we use a v1 config for this block)
endpoints[scope] = net
print('%s output shape: %s' % (scope, net.get_shape()))
return net
build_inception_v4.py 文件源码
python
阅读 20
收藏 0
点赞 0
评论 0
评论列表
文章目录