def discriminator_model(model_name="discriminator"):
disc_input = Input(shape=(400, 1), name="discriminator_input")
aux_input = Input(shape=(47,), name="auxilary_input")
# Conv Layer 1
x = Conv1D(filters=100, kernel_size=13, padding='same')(disc_input)
x = LeakyReLU(0.2)(x) # output shape is 100 x 400
x = AveragePooling1D(pool_size=20)(x) # ouput shape is 100 x 20
# Conv Layer 2
x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
x = LeakyReLU(0.2)(x) # output shape is 250 x 20
x = AveragePooling1D(pool_size=5)(x) # output shape is 250 x 4
# Conv Layer 3
x = Conv1D(filters=300, kernel_size=13, padding='same')(x)
x = LeakyReLU(0.2)(x) # output shape is 300 x 4
x = Flatten()(x) # output shape is 1200
x = concatenate([x, aux_input], axis=-1) # shape is 1247
# Dense Layer 1
x = Dense(200)(x)
x = LeakyReLU(0.2)(x) # output shape is 200
# Dense Layer 2
x = Dense(1)(x)
x = Activation('sigmoid')(x)
discriminator_model = Model(
outputs=[x], inputs=[disc_input, aux_input], name=model_name)
return discriminator_model
python类AveragePooling1D()的实例源码
def test_averagepooling_1d():
for stride in [1, 2]:
layer_test(convolutional.AveragePooling1D,
kwargs={'stride': stride,
'border_mode': 'valid'},
input_shape=(3, 5, 4))
def build_hcnn_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False):
N = maxnum
L = maxlen
logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, nbfilters = %s, filter1_len = %s, filter2_len = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
opts.nbfilters, opts.filter1_len, opts.filter2_len, opts.dropout, opts.l2_value))
word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
drop_x = Dropout(opts.dropout, name='drop_x')(x)
resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)
z = TimeDistributed(Convolution1D(opts.nbfilters, opts.filter1_len, border_mode='valid'), name='z')(resh_W)
avg_z = TimeDistributed(AveragePooling1D(pool_length=L-opts.filter1_len+1), name='avg_z')(z) # shape= (N, 1, nbfilters)
resh_z = Reshape((N, opts.nbfilters), name='resh_z')(avg_z) # shape(N, nbfilters)
hz = Convolution1D(opts.nbfilters, opts.filter2_len, border_mode='valid', name='hz')(resh_z)
# avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)
avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz)
y = Dense(output_dim=1, activation='sigmoid', name='output')(avg_hz)
model = Model(input=word_input, output=y)
if verbose:
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='rmsprop')
total_time = time.time() - start_time
logger.info("Model compiled in %.4f s" % total_time)
return model
def test_averagepooling_1d():
for stride in [1, 2]:
layer_test(convolutional.AveragePooling1D,
kwargs={'stride': stride,
'border_mode': 'valid'},
input_shape=(3, 5, 4))
def test_averagepooling_1d():
for stride in [1, 2]:
layer_test(convolutional.AveragePooling1D,
kwargs={'stride': stride,
'border_mode': 'valid'},
input_shape=(3, 5, 4))
def lstm_attention_combine_train(X_train_list,y_train,vocab_size):
N=len(X_train_list)
X_train_list = [sequence.pad_sequences(x_train, maxlen=MAX_LEN) for x_train in X_train_list]
input_list=[]
out_list=[]
for i in range(N):
input,out=get_embedding_input_output('f%d' %i,vocab_size)
input_list.append(input)
out_list.append(out)
x = merge(out_list,mode='concat')
lstm_out = LSTM(HIDDEN_SIZE, return_sequences=True)(x)
x = lstm_out
for i in range(10):
att = TimeDistributed(Dense(1))(x)
att = Flatten()(att)
att = Activation(activation="softmax")(att)
att = RepeatVector(HIDDEN_SIZE)(att)
att = Permute((2,1))(att)
x = att
mer = merge([att, lstm_out], "mul")
mer = merge([mer, out_list[-1]], 'mul')
hid = AveragePooling1D(pool_length=2)(mer)
hid = Flatten()(hid)
#hid = merge([hid,out_list[-1]], mode='concat')
main_loss = Dense(1, activation='sigmoid', name='main_output')(hid)
model = Model(input=input_list, output=main_loss)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)
return model
def discriminator_model(model_name="discriminator"):
disc_input = Input(shape=(400, 1), name="discriminator_input")
aux_input = Input(shape=(47,), name="auxilary_input")
# Conv Layer 1
x = Convolution1D(nb_filter=100,
filter_length=13,
border_mode='same',
subsample_length=1)(disc_input)
x = LeakyReLU(0.2)(x) # output shape is 100 x 400
x = AveragePooling1D(pool_length=20)(x) # ouput shape is 100 x 20
# Conv Layer 2
x = Convolution1D(nb_filter=250,
filter_length=13,
border_mode='same',
subsample_length=1)(x)
x = LeakyReLU(0.2)(x) # output shape is 250 x 20
x = AveragePooling1D(pool_length=5)(x) # output shape is 250 x 4
# Conv Layer 3
x = Convolution1D(nb_filter=300,
filter_length=13,
border_mode='same',
subsample_length=1)(x)
x = LeakyReLU(0.2)(x) # output shape is 300 x 4
x = Flatten()(x) # output shape is 1200
x = merge([x, aux_input], mode="concat", concat_axis=-1) # shape is 1247
# Dense Layer 1
x = Dense(200)(x)
x = LeakyReLU(0.2)(x) # output shape is 200
# Dense Layer 2
x = Dense(1)(x)
#x = Activation('sigmoid')(x)
x = Activation('linear')(x) # output shape is 1
discriminator_model = Model(
input=[disc_input, aux_input], output=[x], name=model_name)
return discriminator_model
def lstm_memory_train(X_train_list,y_train,vocab_size):
N=len(X_train_list)
X_train_list = [sequence.pad_sequences(x_train, maxlen=MAX_LEN) for x_train in X_train_list]
input_list=[]
out_list=[]
for i in range(N):
input,out=get_embedding_input_output('f%d' %i,vocab_size)
input_list.append(input)
out_list.append(out)
x = merge(out_list,mode='concat')
lstm_out = LSTM(HIDDEN_SIZE, return_sequences=True)(x)
lstm_share=GRU(HIDDEN_SIZE, return_sequences=True)
x = lstm_out
for i in range(2):
att = TimeDistributed(Dense(1))(x)
att = Flatten()(att)
att = Activation(activation="softmax")(att)
att = RepeatVector(HIDDEN_SIZE)(att)
att = Permute((2,1))(att)
mer = merge([att, lstm_out], "mul")
mer = merge([mer, out_list[-1]], 'mul')
z = merge([lstm_out,mer],'sum')
z = lstm_share(z)
x = z
hid = AveragePooling1D(pool_length=2)(x)
hid = Flatten()(hid)
#hid = merge([hid,out_list[-1]], mode='concat')
main_loss = Dense(1, activation='sigmoid', name='main_output')(hid)
model = Model(input=input_list, output=main_loss)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)
return model