def st_conv_inception_4(input_shape, weights_path=None, mode=0, nb_res_layer=5):
if K.image_dim_ordering() == 'tf':
channel_axis = 3
else:
channel_axis = 1
input = Input(shape=input_shape, name='input_node', dtype=K.floatx())
# Downsampling
c = Convolution2D(13, 7, 7, dim_ordering=K.image_dim_ordering(),
init='he_normal', subsample=(2, 2), border_mode='same', activation='linear')(input)
out = BatchNormalization(mode=mode, axis=channel_axis, momentum=0.9, gamma_init='he_normal')(c)
a = PReLU()(out)
p = AveragePooling2D(pool_size=(2, 2), dim_ordering=K.image_dim_ordering(), border_mode='same')(input)
m = merge([a, p], mode='concat', concat_axis=channel_axis) # 16 layers
c = Convolution2D(48, 3, 3, dim_ordering=K.image_dim_ordering(),
init='he_normal', subsample=(2, 2), border_mode='same', activation='linear')(m)
a = PReLU()(c)
p = AveragePooling2D(pool_size=(2, 2), dim_ordering=K.image_dim_ordering(), border_mode='same')(m)
m = merge([a, p], mode='concat', concat_axis=channel_axis) # 64 layers
out = Convolution2D(128, 3, 3, dim_ordering=K.image_dim_ordering(),
init='he_normal', subsample=(1, 1), border_mode='same', activation='linear')(m)
last_out = BatchNormalization(mode=mode, axis=channel_axis, momentum=0.9, gamma_init='he_normal')(out)
last_out = Activation('relu')(last_out)
for i in range(nb_res_layer):
out = inception_layer(last_out, K.image_dim_ordering(), channel_axis, mode)
last_out = merge([last_out, out], mode='sum')
out = Convolution2D(128, 3, 3, dim_ordering=K.image_dim_ordering(),
init='he_normal', subsample=(1, 1), border_mode='same', activation='linear')(last_out)
out = ConvolutionTranspose2D(3, 5, 5, dim_ordering=K.image_dim_ordering(),
init='he_normal', subsample=(4, 4), border_mode='same', activation='linear')(out)
out = ScaledSigmoid(scaling=255., name="output_node")(out)
model = Model(input=[input], output=[out])
if weights_path:
model.load_weights(weights_path)
return model
评论列表
文章目录