def cnn_word_model(self):
embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))
outputs = []
for i in range(len(self.kernel_sizes)):
output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
output_i = BatchNormalization()(output_i)
output_i = Activation('relu')(output_i)
output_i = GlobalMaxPooling1D()(output_i)
outputs.append(output_i)
output = concatenate(outputs, axis=1)
output = Dropout(rate=self.opt['dropout_rate'])(output)
output = Dense(self.opt['dense_dim'], activation=None,
kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
output = BatchNormalization()(output)
output = Activation('relu')(output)
output = Dropout(rate=self.opt['dropout_rate'])(output)
output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
output = BatchNormalization()(output)
act_output = Activation('sigmoid')(output)
model = Model(inputs=embed_input, outputs=act_output)
return model
python类GlobalMaxPooling1D()的实例源码
def fhan2_max(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')
wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs)
hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)
Si = GlobalMaxPooling1D()(hij)
wordEncoder = Model(wordInputs, Si)
# -----------------------------------------------------------------------------------------------
docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')
#sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)
sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs)
hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)
Vb = GlobalMaxPooling1D()(hi)
v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
model = Model(inputs=[docInputs] , outputs=[v6])
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model, wordEncoder
def test_globalpooling_1d():
layer_test(pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
layer_test(pooling.GlobalAveragePooling1D,
input_shape=(3, 4, 5))
def __call__(self, inputs):
x = self.model(inputs)
avg_x = GlobalAveragePooling1D()(x)
max_x = GlobalMaxPooling1D()(x)
x = concatenate([avg_x, max_x])
x = BatchNormalization()(x)
return x
def __call__(self, inputs):
x = Conv1D(self.filters, 3, activation='relu')(inputs)
return GlobalMaxPooling1D()(x)
def test_globalpooling_1d():
layer_test(pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
layer_test(pooling.GlobalAveragePooling1D,
input_shape=(3, 4, 5))
def test_globalpooling_1d():
layer_test(pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
layer_test(pooling.GlobalAveragePooling1D,
input_shape=(3, 4, 5))
def Model1(dim, max_ques_len, max_ans_len, vocab_lim, embedding):
inp_q = Input(shape=(max_ques_len,))
embedding_q = Embedding(vocab_lim, dim, input_length=max_ques_len, weights=[embedding], trainable=False)(inp_q)
conv_q= Convolution1D(100, 5, border_mode='same', activation='relu')(embedding_q)
conv_q = Dropout(0.25)(conv_q)
pool_q = GlobalMaxPooling1D()(conv_q)
inp_a = Input(shape=(max_ans_len,))
embedding_a = Embedding(vocab_lim, dim, input_length=max_ans_len, weights=[embedding], trainable=False)(inp_a)
conv_a = Convolution1D(100, 5, border_mode='same', activation='relu')(embedding_a)
conv_a = Dropout(0.25)(conv_a)
pool_a = GlobalMaxPooling1D()(conv_a)
#sim = SimLayer(1)([pool_q, pool_a])
sim = merge([Dense(100, bias=False)(pool_q), pool_a], mode='dot')
# print pool_a, pool_q
# model1 = merge([pool_q, pool_a, sim], mode='concat')
# model = Model(input=[inp_q, inp_a], output=[model1])
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# print model.summary()
# return model
model_sim = merge([pool_q, pool_a, sim], mode='concat')
print model_sim
# #model_final = Flatten()(model_sim)
model_final = Dropout(0.5)(model_sim)
model_final = Dense(201)(model_final)
model_final = Dropout(0.5)(model_final)
model_final = Dense(1, activation='sigmoid')(model_final)
model = Model(input=[inp_q, inp_a], output=[model_final])
print(model.output_shape)
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
print model.summary()
return model
def text_cnn(max_seq_index, max_seq_length):
text_input = Input(shape = (max_seq_length,), name='text_input')
x = Embedding(output_dim=15,
input_dim=max_seq_index,
input_length=max_seq_length)(text_input)
conv_a = Conv1D(15,2, activation='relu')(x)
conv_b = Conv1D(15,4, activation='relu')(x)
conv_c = Conv1D(15,6, activation='relu')(x)
pool_a = GlobalMaxPooling1D()(conv_a)
pool_b = GlobalMaxPooling1D()(conv_b)
pool_c = GlobalMaxPooling1D()(conv_c)
flattened = concatenate(
[pool_a, pool_b, pool_c])
drop = Dropout(.2)(flattened)
dense = Dense(1)(drop)
out = Activation("sigmoid")(dense)
model = Model(inputs=text_input, outputs=out)
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def fhan3_max(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')
wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs)
hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)
#alpha_its, Si = AttentionLayer(name='att1')(hij)
wordDrop = Dropout(DROPOUTPER, name='wordDrop')(hij)
word_max = GlobalMaxPooling1D()(wordDrop)
wordEncoder = Model(wordInputs, word_max)
# -----------------------------------------------------------------------------------------------
docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')
#sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)
sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs)
hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)
#alpha_s, Vb = AttentionLayer(name='att2')(hi)
sentDrop = Dropout(DROPOUTPER, name='sentDrop')(hi)
sent_max = GlobalMaxPooling1D()(sentDrop)
Vb = Reshape((1, sent_max._keras_shape[1]))(sent_max)
#-----------------------------------------------------------------------------------------------
headlineInput = Input(shape=(MAX_WORDS,), name='headlineInput',dtype='float32')
headlineEmb = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, mask_zero=False, name='headlineEmb')(headlineInput)
#Vb = Masking(mask_value=0.0, name='Vb')(Vb)
headlineBodyEmb = concatenate([headlineEmb, Vb], axis=1, name='headlineBodyEmb')
h3 = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru3')(headlineBodyEmb)
#a3, Vn = AttentionLayer(name='att3')(h3)
headDrop = Dropout(DROPOUTPER, name='3Drop')(h3)
head_max = GlobalMaxPooling1D()(headDrop)
v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(head_max)
model = Model(inputs=[docInputs, headlineInput] , outputs=[v6])
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model, wordEncoder
def cnn_melspect_1D(input_shape):
kernel_size = 3
#activation_func = LeakyReLU()
activation_func = Activation('relu')
inputs = Input(input_shape)
# Convolutional block_1
conv1 = Conv1D(32, kernel_size)(inputs)
act1 = activation_func(conv1)
bn1 = BatchNormalization()(act1)
pool1 = MaxPooling1D(pool_size=2, strides=2)(bn1)
# Convolutional block_2
conv2 = Conv1D(64, kernel_size)(pool1)
act2 = activation_func(conv2)
bn2 = BatchNormalization()(act2)
pool2 = MaxPooling1D(pool_size=2, strides=2)(bn2)
# Convolutional block_3
conv3 = Conv1D(128, kernel_size)(pool2)
act3 = activation_func(conv3)
bn3 = BatchNormalization()(act3)
# Global Layers
gmaxpl = GlobalMaxPooling1D()(bn3)
gmeanpl = GlobalAveragePooling1D()(bn3)
mergedlayer = concatenate([gmaxpl, gmeanpl], axis=1)
# Regular MLP
dense1 = Dense(512,
kernel_initializer='glorot_normal',
bias_initializer='glorot_normal')(mergedlayer)
actmlp = activation_func(dense1)
reg = Dropout(0.5)(actmlp)
dense2 = Dense(512,
kernel_initializer='glorot_normal',
bias_initializer='glorot_normal')(reg)
actmlp = activation_func(dense2)
reg = Dropout(0.5)(actmlp)
dense2 = Dense(10, activation='softmax')(reg)
model = Model(inputs=[inputs], outputs=[dense2])
return model