def build(self):
self.input_layer = Input(shape=(self.input_size,), name='count')
self.sf_layer = Input(shape=(1,), name='size_factors')
last_hidden = self.input_layer
for i, (hid_size, hid_drop) in enumerate(zip(self.hidden_size, self.hidden_dropout)):
center_idx = int(np.floor(len(self.hidden_size) / 2.0))
if i == center_idx:
layer_name = 'center'
stage = 'center' # let downstream know where we are
elif i < center_idx:
layer_name = 'enc%s' % i
stage = 'encoder'
else:
layer_name = 'dec%s' % (i-center_idx)
stage = 'decoder'
# use encoder-specific l1/l2 reg coefs if given
if self.l1_enc_coef != 0. and stage in ('center', 'encoder'):
l1 = self.l1_enc_coef
else:
l1 = self.l1_coef
if self.l2_enc_coef != 0. and stage in ('center', 'encoder'):
l2 = self.l2_enc_coef
else:
l2 = self.l2_coef
last_hidden = Dense(hid_size, activation=None, kernel_initializer=self.init,
kernel_regularizer=l1_l2(l1, l2),
name=layer_name)(last_hidden)
if self.batchnorm:
last_hidden = BatchNormalization(center=True, scale=False)(last_hidden)
# Use separate act. layers to give user the option to get pre-activations
# of layers when requested
try:
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)
except ValueError: # fallback to advanced activations
last_hidden = keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
if hid_drop > 0.0:
last_hidden = Dropout(hid_drop, name='%s_drop'%layer_name)(last_hidden)
self.decoder_output = last_hidden
self.build_output()
评论列表
文章目录