def densenet_model(nb_blocks, nb_layers, growth_rate, dropout=0., l2_reg=1e-4,
init_channels=16):
n_channels = init_channels
inputs = Input(shape=(32, 32, 3))
x = Convolution2D(init_channels, 3, 3, border_mode='same',
init='he_normal', W_regularizer=l2(l2_reg),
bias=False)(inputs)
for i in range(nb_blocks - 1):
# Create a dense block
x = dense_block(x, nb_layers, growth_rate,
dropout=dropout, l2_reg=l2_reg)
# Update the number of channels
n_channels += nb_layers*growth_rate
# Transition layer
x = transition_block(x, n_channels, dropout=dropout, l2_reg=l2_reg)
# Add last dense_block
x = dense_block(x, nb_layers, growth_rate, dropout=dropout, l2_reg=l2_reg)
# Add final BN-Relu
x = BatchNormalization(gamma_regularizer=l2(l2_reg),
beta_regularizer=l2(l2_reg))(x)
x = Activation('relu')(x)
# Global average pooling
x = GlobalAveragePooling2D()(x)
x = Dense(10, W_regularizer=l2(l2_reg))(x)
x = Activation('softmax')(x)
model = Model(input=inputs, output=x)
return model
# Apply preprocessing as described in the paper: normalize each channel
# individually. We use the values from fb.resnet.torch, but computing the values
# gets a very close answer.
评论列表
文章目录