def block17(input, scale=1.0, activation_fn='relu'):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
shortcut = input
tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)
tower_conv1_0 = conv2d_bn(input, 128, 1, 1, activ_fn=activation_fn)
tower_conv1_1 = conv2d_bn(tower_conv1_0, 160, 1, 7, activ_fn=activation_fn)
tower_conv1_2 = conv2d_bn(tower_conv1_1, 192, 7, 1, activ_fn=activation_fn)
mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis)
up = conv2d_bn(mixed, 1088, 1, 1, activ_fn=False, normalize=False)
up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up)
net = merge([shortcut, up], mode='sum')
if activation_fn:
net = Activation(activation_fn)(net)
return net
inception_resnet_v2.py 文件源码
python
阅读 20
收藏 0
点赞 0
评论 0
评论列表
文章目录