def __init__(self, word_index, embedding_matrix):
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
conv_blocksA = []
conv_blocksQ = []
for sz in [3,5]:
conv = Convolution1D(filters=20,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(embedded_answer)
conv = MaxPooling1D(pool_size=2)(conv)
conv = Flatten()(conv)
conv_blocksA.append(conv)
for sz in [5,7, 9]:
conv = Convolution1D(filters=20,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(embedded_question)
conv = MaxPooling1D(pool_size=3)(conv)
conv = Flatten()(conv)
conv_blocksQ.append(conv)
z = Concatenate()(conv_blocksA + conv_blocksQ)
z = Dropout(0.5)(z)
z = Dense(100, activation="relu")(z)
softmax_c_q = Dense(2, activation='softmax')(z)
self.model = Model([question, answer], softmax_c_q)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
python类Convolution1D()的实例源码
def __init__(self, word_index, embedding_matrix):
embedding_layer_c = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_C,
trainable=False)
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_context = embedding_layer_c(context)
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
l_lstm_c = Bidirectional(LSTM(60, return_sequences=True))(embedded_context)
conv_blocksC = []
for sz in [5,7]:
conv = Convolution1D(filters=20,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(l_lstm_c)
conv = MaxPooling1D(pool_size=2)(conv)
conv = Flatten()(conv)
conv_blocksC.append(conv)
l_lstm_q = Bidirectional(LSTM(60, return_sequences=True))(embedded_question)
conv_blocksQ = []
for sz in [3, 5]:
conv = Convolution1D(filters=20,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(l_lstm_q)
conv = MaxPooling1D(pool_size=2)(conv)
conv = Flatten()(conv)
conv_blocksQ.append(conv)
l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)
concat_c_q = concatenate([l_lstm_a] + conv_blocksQ + conv_blocksC , axis=1)
relu_c_q_a = Dense(100, activation='relu')(concat_c_q)
relu_c_q_a = Dropout(0.25)(relu_c_q_a)
softmax_c_q_a = Dense(2, activation='softmax')(relu_c_q_a)
self.model = Model([question, answer, context], softmax_c_q_a)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
def get_model(self, num_features):
embedding_dims = 128
nb_filter = 250
filter_length = 8
drop = 0.2
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(self.vocab_size,
embedding_dims,
input_length=num_features[0],
dropout=0.2))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
# we use max over time pooling by defining a python function to use
# in a Lambda layer
def max_1d(X):
return K.max(X, axis=1)
model.add(Lambda(max_1d, output_shape=(nb_filter,)))
model.add(Dropout(drop))
model.add(Dense(1024, init='glorot_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(drop))
model.add(Dense(1024, init='glorot_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def create_network(**kwargs):
defaults = {
'timesteps': 128,
'data_dim': 14,
'nb_filter': 64,
'filter_length': 3,
'pool_length': 2
}
params = defaults
params.update(**kwargs)
network = Sequential()
network.add(Convolution1D(nb_filter=params['nb_filter'],
filter_length=params['filter_length'],
border_mode='valid',
activation='relu',
subsample_length=1,
input_shape=(params['timesteps'], params[
'data_dim'])))
network.add(MaxPooling1D(pool_length=params['pool_length']))
network.add(Dropout(0.5))
# network.add(Convolution1D(nb_filter=params['nb_filter'],
# filter_length=params['filter_length'],
# border_mode='valid',
# activation='relu',
# subsample_length=1))
# network.add(MaxPooling1D(pool_length=params['pool_length']))
# network.add(Dropout(0.5))
# network.add(Flatten())
# # Note: Keras does automatic shape inference.
# network.add(Dense(params['nb_filter'] * 4))
# network.add(Activation('relu'))
# network.add(Dropout(0.25))
network.add(LSTM(64))
network.add(Dropout(0.15))
network.add(Dense(1))
network.add(Activation('sigmoid'))
network.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
return network
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3):
left = Sequential()
left.add(Embedding(input_dim,
32, # character embedding size
input_length=L,
dropout=0.2))
left.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
left.add(GlobalMaxPooling1D())
left.add(Dense(100))
left.add(Dropout(0.2))
left.add(Activation("tanh"))
center = Sequential()
center.add(Embedding(input_dim,
32, # character embedding size
input_length=M,
dropout=0.2))
center.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
center.add(GlobalMaxPooling1D())
center.add(Dense(100))
center.add(Dropout(0.2))
center.add(Activation("tanh"))
right = Sequential()
right.add(Embedding(input_dim,
32, # character embedding size
input_length=R,
dropout=0.2))
right.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
right.add(GlobalMaxPooling1D())
right.add(Dense(100))
right.add(Dropout(0.2))
right.add(Activation("tanh"))
clf = Sequential()
clf.add(Merge([left,center,right],mode="concat"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
def block_deepFlavourConvolutions(charged,neutrals,vertices,dropoutRate,active=True,batchnorm=False,batchmomentum=0.6):
'''
deep Flavour convolution part.
'''
cpf=charged
if active:
cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv0')(cpf)
if batchnorm:
cpf = BatchNormalization(momentum=batchmomentum ,name='cpf_batchnorm0')(cpf)
cpf = Dropout(dropoutRate,name='cpf_dropout0')(cpf)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv1')(cpf)
if batchnorm:
cpf = BatchNormalization(momentum=batchmomentum,name='cpf_batchnorm1')(cpf)
cpf = Dropout(dropoutRate,name='cpf_dropout1')(cpf)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv2')(cpf)
if batchnorm:
cpf = BatchNormalization(momentum=batchmomentum,name='cpf_batchnorm2')(cpf)
cpf = Dropout(dropoutRate,name='cpf_dropout2')(cpf)
cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu' , name='cpf_conv3')(cpf)
else:
cpf = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(cpf)
npf=neutrals
if active:
npf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='npf_conv0')(npf)
if batchnorm:
npf = BatchNormalization(momentum=batchmomentum,name='npf_batchnorm0')(npf)
npf = Dropout(dropoutRate,name='npf_dropout0')(npf)
npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu', name='npf_conv1')(npf)
if batchnorm:
npf = BatchNormalization(momentum=batchmomentum,name='npf_batchnorm1')(npf)
npf = Dropout(dropoutRate,name='npf_dropout1')(npf)
npf = Convolution1D(4, 1, kernel_initializer='lecun_uniform', activation='relu' , name='npf_conv2')(npf)
else:
npf = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(npf)
vtx = vertices
if active:
vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv0')(vtx)
if batchnorm:
vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm0')(vtx)
vtx = Dropout(dropoutRate,name='vtx_dropout0')(vtx)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv1')(vtx)
if batchnorm:
vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm1')(vtx)
vtx = Dropout(dropoutRate,name='vtx_dropout1')(vtx)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv2')(vtx)
if batchnorm:
vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm2')(vtx)
vtx = Dropout(dropoutRate,name='vtx_dropout2')(vtx)
vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv3')(vtx)
else:
vtx = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(vtx)
return cpf,npf,vtx
def convolutional_model_lessbroad(Inputs,nclasses,nregclasses,dropoutRate=-1):
"""
the inputs are really not working as they are. need a reshaping well before
"""
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0])
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1])
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Dropout(dropoutRate)(cpf)
cpf = Flatten()(cpf)
npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[2])
npf = Dropout(dropoutRate)(npf)
npf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(npf)
npf = Dropout(dropoutRate)(npf)
npf = Flatten()(npf)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[3])
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Dropout(dropoutRate)(vtx)
vtx = Flatten()(vtx)
x = Concatenate()( [Inputs[0],cpf,npf,vtx ] )
x = Dropout(dropoutRate)(x)
x= Dense(600, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x)
model = Model(inputs=Inputs, outputs=predictions)
return model
def Dense_model_broad_reg(Inputs,nclasses,Inputshapes,dropoutRate=-1, npred = 1):
"""
the inputs are really not working as they are. need a reshaping well before
"""
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0])
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1])
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Flatten()(cpf)
npf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[2])(Inputs[2])
npf = Dropout(dropoutRate)(npf)
npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(npf)
npf = Dropout(dropoutRate)(npf)
npf = Convolution1D(4, 1, kernel_initializer='lecun_uniform', activation='relu')(npf)
npf = Flatten()(npf)
vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[3])(Inputs[3])
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Flatten()(vtx)
x = merge( [Inputs[0],cpf,npf,vtx ] , mode='concat')
x= Dense(350, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x = merge( [Inputs[4], x ] , mode='concat')
predictions = Dense(npred, activation='linear',kernel_initializer='he_normal')(x)
model = Model(inputs=Inputs, outputs=predictions)
return model
def Dense_model_lessbroad(Inputs,nclasses,Inputshapes,dropoutRate=-1):
"""
the inputs are really not working as they are. need a reshaping well before
"""
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0])
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
#gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1])
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Dropout(dropoutRate)(cpf)
cpf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf)
cpf = Dropout(dropoutRate)(cpf)
cpf = Flatten()(cpf)
npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[2])(Inputs[2])
npf = Dropout(dropoutRate)(npf)
npf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(npf)
npf = Dropout(dropoutRate)(npf)
npf = Flatten()(npf)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[3])(Inputs[3])
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Dropout(dropoutRate)(vtx)
vtx = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx)
vtx = Dropout(dropoutRate)(vtx)
vtx = Flatten()(vtx)
x = merge( [Inputs[0],cpf,npf,vtx ] , mode='concat')
x = Dropout(dropoutRate)(x)
x= Dense(600, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
x = Dropout(dropoutRate)(x)
x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x)
model = Model(inputs=Inputs, outputs=predictions)
return model