def build(self):
query = Input(name='query', shape=(self.config['vocab_size'],))#, sparse=True)
show_layer_info('Input', query)
doc = Input(name='doc', shape=(self.config['vocab_size'],))#, sparse=True)
show_layer_info('Input', doc)
def mlp_work(input_dim):
seq = Sequential()
#seq.add(SparseFullyConnectedLayer(self.config['hidden_sizes'][0], input_dim=input_dim, activation='relu'))
num_hidden_layers = len(self.config['hidden_sizes'])
if num_hidden_layers == 1:
seq.add(Dense(self.config['hidden_sizes'][0], input_shape=(input_dim,), activity_regularizer=regularizers.l2(self.config['reg_rate'])))
else:
seq.add(Dense(self.config['hidden_sizes'][0], activation='tanh', input_shape=(input_dim,), activity_regularizer=regularizers.l2(self.config['reg_rate'])))
for i in range(num_hidden_layers-2):
seq.add(Dense(self.config['hidden_sizes'][i+1], activation='tanh', activity_regularizer=regularizers.l2(self.config['reg_rate'])))
seq.add(Dropout(rate=self.config['dropout_rate']))
seq.add(Dense(self.config['hidden_sizes'][num_hidden_layers-1], activity_regularizer=regularizers.l2(self.config['reg_rate'])))
seq.add(Dropout(rate=self.config['dropout_rate']))
return seq
mlp = mlp_work(self.config['vocab_size'])
rq = mlp(query)
show_layer_info('MLP', rq)
rd = mlp(doc)
show_layer_info('MLP', rd)
'''
rep = Concatenate(axis=1) ([rq, rd])
show_layer_info('Concatenate', rep)
rep = Dropout(rate=self.config['dropout_rate'])(rep)
show_layer_info('Dropout', rep)
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(rep)
elif self.config['target_mode'] in ['regression', 'ranking']:
out_ = Dense(1)(rep)
show_layer_info('Dense', out_)
'''
out_ = Dot( axes= [1, 1], normalize=True)([rq, rd])
show_layer_info('Dot', out_)
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(out_)
show_layer_info('Dense', out_)
model = Model(inputs=[query, doc], outputs=[out_])
return model
评论列表
文章目录