def inception_resnet_v2_B(input, scale_residual=True):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
# Input is relu activation
init = input
ir1 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(128, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(160, 1, 7, activation='relu', border_mode='same')(ir2)
ir2 = Convolution2D(192, 7, 1, activation='relu', border_mode='same')(ir2)
ir_merge = merge([ir1, ir2], mode='concat', concat_axis=channel_axis)
ir_conv = Convolution2D(1152, 1, 1, activation='linear', border_mode='same')(ir_merge)
if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
out = merge([init, ir_conv], mode='sum')
out = BatchNormalization(axis=channel_axis)(out)
out = Activation("relu")(out)
return out
评论列表
文章目录