def create_convnet(self, _input, dense_dim=1000, dy=10, nb_filters=[64, 128], kernel_size=(3, 3), pool_size=(2, 2),
dropout=0.5, bn=True, output_activation='softmax', opt='adam'):
"""
Create convnet model / encoder of DRCN
Args:
_input (Tensor) : input layer
dense_dim (int) : dimensionality of the final dense layers
dy (int) : output dimensionality
nb_filter (list) : list of #Conv2D filters
kernel_size (tuple) : Conv2D kernel size
pool_size (tuple) : MaxPool kernel size
dropout (float) : dropout rate
bn (boolean) : batch normalization mode
output_activation (string) : act. function for output layer
opt (string) : optimizer
Store the shared layers into self.enc_functions list
"""
_h = _input
self.enc_functions = [] # to store the shared layers, will be used later for constructing conv. autoencoder
for i, nf in enumerate(nb_filters):
enc_f = Conv2D(nf, kernel_size, padding='same')
_h = enc_f(_h)
self.enc_functions.append(enc_f)
_h = Activation('relu')(_h)
if i < 2:
_h = MaxPooling2D(pool_size=pool_size, padding='same')(_h)
_h = Flatten()(_h)
enc_f = Dense(dense_dim)
_h = enc_f(_h)
self.enc_functions.append(enc_f)
if bn:
_h = BatchNormalization()(_h)
_h = Activation('relu')(_h)
_h = Dropout(dropout)(_h)
enc_f = Dense(dense_dim)
_h = enc_f(_h)
self.enc_functions.append(enc_f)
if bn:
_h = BatchNormalization()(_h)
_feat = Activation('relu')(_h)
_h = Dropout(dropout)(_feat)
_y = Dense(dy, activation=output_activation)(_h)
# convnet
self.convnet_model = Model(input=_input, output=_y)
self.convnet_model.compile(loss='categorical_crossentropy', optimizer=opt)
print(self.convnet_model.summary())
self.feat_model = Model(input=_input, output=_feat)
评论列表
文章目录