def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation = 'relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
return xent_loss + kl_loss
return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
python类Convolution1D()的实例源码
def getconvmodel(filter_length,nb_filter):
model = Sequential()
model.add(Convolution1D(nb_filter=nb_filter,
input_shape=(100,32),
filter_length=filter_length,
border_mode='same',
activation='relu',
subsample_length=1))
model.add(Lambda(sum_1d, output_shape=(nb_filter,)))
#model.add(BatchNormalization(mode=0))
model.add(Dropout(0.5))
return model
def generator_model_bpsk(no_bits_in_a_frame):
"""
BPSK outputs will be generated by CCN.
CCN would be 1-2x because x is binary and the output should be bipolar.
Also, it is 1-tap processing. For 16-QAM, it will be more compliated.
I should consider how to optimize stride or oversampling/max polling
in a network. For GANs, hyperparameters can be more well optimized than
conventional feedforward networks.
While I was watching RNN-LSTM, I realized that many hyperparameters such as
gating variables are optimized by networks itself. Those values have been optimized
by grid search or some other external techniques. However, RNN can do it by itself online.
These capability may come from RNN superpower. Similarly, many hyperparameters can be
easily optimized in GANs.
"""
model = Sequential()
model.add(Convolution1D(
1, 1,
input_shape=(no_bits_in_a_frame, 1)))
return model
def generator_model(): # CDNN Model
print(INPUT_LN, N_GEN_l, CODE_LN)
model = Sequential()
model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
model.add(Activation('relu'))
model.add(UpSampling1D(length=N_GEN_l[0]))
model.add(Convolution1D(32, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(UpSampling1D(length=N_GEN_l[1]))
model.add(Convolution1D(1, 5, border_mode='same'))
model.add(Activation('tanh'))
return model
def discriminator_model():
model = Sequential()
model.add(Convolution1D(
12, 5,
border_mode='same',
input_shape=(INPUT_LN, 1)))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_length=N_GEN_l[0]))
model.add(Convolution1D(12, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_length=N_GEN_l[1]))
#model.add(Reshape((128*7,)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def discriminator_model():
model = Sequential()
model.add(Convolution1D(
12, 5,
border_mode='same',
input_shape=(INPUT_LN, 1)))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_length=4))
model.add(Convolution1D(12, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_length=4))
#model.add(Reshape((128*7,)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
test_separable_RNN.py 文件源码
项目:New_Layers-Keras-Tensorflow
作者: WeidiXie
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def BuildModel(dim):
'''
:param dim: input shape.
The Separable_SimpleRNN can be added after linear convolution.
Make sure the nb_filter in the Convolution layer is equivalent to output_dim in RNN
'''
inp = Input(shape=dim)
#
x_conv = Convolution1D(nb_filter=16, filter_length=5, border_mode='same')(inp)
x_rnn = Separable_SimpleRNN(output_dim=16, activation='relu')(x_conv)
#
model = Model(input=inp, output=x_rnn)
return model
# Load an example image.
test_layer_normalization_RNN.py 文件源码
项目:New_Layers-Keras-Tensorflow
作者: WeidiXie
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def BuildModel(dim):
'''
:param dim: input shape.
The Separable_SimpleRNN can be added after linear convolution.
Make sure the nb_filter in the Convolution layer is equivalent to output_dim in RNN
'''
inp = Input(shape=dim)
#
x_conv = Convolution1D(nb_filter=16, filter_length=5, border_mode='same')(inp)
x_rnn = LN_SimpleRNN(output_dim=16, activation='tanh')(x_conv)
#
model = Model(input=inp, output=x_rnn)
return model
# Load an example image.
model_zoo.py 文件源码
项目:visual_turing_test-tutorial
作者: mateuszmalinowski
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def create(self):
self.textual_embedding(self, mask_zero=False)
self.add(Convolution1D(
nb_filter=self._config.language_cnn_filters,
filter_length=self._config.language_cnn_filter_length,
border_mode='valid',
activation=self._config.language_cnn_activation,
subsample_length=1))
#self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length))
self.add(self._config.recurrent_encoder(
self._config.hidden_state_dim,
return_sequences=False,
go_backwards=False))
self.deep_mlp()
self.add(Dense(self._config.output_dim))
self.add(Activation('softmax'))
model_zoo.py 文件源码
项目:visual_turing_test-tutorial
作者: mateuszmalinowski
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def create(self):
assert self._config.merge_mode in ['max', 'ave', 'sum'], \
'Merge mode of this model is either max, ave or sum'
model_list = [None] * self._config.language_cnn_views
for j in xrange(1,self._config.language_cnn_views+1):
current_view = Sequential()
self.textual_embedding(current_view, mask_zero=True)
current_view.add(Convolution1D(
nb_filter=self._config.language_cnn_filters,
filter_length=j,
border_mode='valid',
activation=self._config.language_cnn_activation,
subsample_length=1))
self.temporal_pooling(current_view)
model_list[j-1] = current_view
self.add(Merge(model_list, mode='concat'))
self.deep_mlp()
self.add(Dense(self._config.output_dim))
self.add(Activation('softmax'))
def test_convolution_1d():
nb_samples = 2
nb_steps = 8
input_dim = 2
filter_length = 3
nb_filter = 3
for border_mode in _convolution_border_modes:
for subsample_length in [1, 2]:
if border_mode == 'same' and subsample_length != 1:
continue
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
def build_hcnn_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False):
N = maxnum
L = maxlen
logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, nbfilters = %s, filter1_len = %s, filter2_len = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
opts.nbfilters, opts.filter1_len, opts.filter2_len, opts.dropout, opts.l2_value))
word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
drop_x = Dropout(opts.dropout, name='drop_x')(x)
resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)
z = TimeDistributed(Convolution1D(opts.nbfilters, opts.filter1_len, border_mode='valid'), name='z')(resh_W)
avg_z = TimeDistributed(AveragePooling1D(pool_length=L-opts.filter1_len+1), name='avg_z')(z) # shape= (N, 1, nbfilters)
resh_z = Reshape((N, opts.nbfilters), name='resh_z')(avg_z) # shape(N, nbfilters)
hz = Convolution1D(opts.nbfilters, opts.filter2_len, border_mode='valid', name='hz')(resh_z)
# avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)
avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz)
y = Dense(output_dim=1, activation='sigmoid', name='output')(avg_hz)
model = Model(input=word_input, output=y)
if verbose:
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='rmsprop')
total_time = time.time() - start_time
logger.info("Model compiled in %.4f s" % total_time)
return model
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
embedding_size = 300
pool_length = 4
lstm_output_size = 100
batch_size = 200
nb_epoch = 1
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout({{uniform(0, 1)}}))
# Note that we use unnamed parameters here, which is bad style, but is used here
# to demonstrate that it works. Always prefer named parameters.
model.add(Convolution1D({{choice([64, 128])}},
{{choice([6, 8])}},
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution1D(
nb_filter=nb_filter,
filter_length=fsz,
border_mode='valid',
#activation='relu',
subsample_length=1,
init='glorot_uniform',
#init=init,
#init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
#W_regularizer=regularizers.l2(self.params.get('w_l2')),
#b_regularizer=regularizers.l2(self.params.get('b_l2')),
#input_shape=(self.q_length, self.wdim),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution1D(
nb_filter=nb_filter,
filter_length=fsz,
border_mode='valid',
#activation='relu',
subsample_length=1,
init='glorot_uniform',
#init=init,
#init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
#W_regularizer=regularizers.l2(self.params.get('w_l2')),
#b_regularizer=regularizers.l2(self.params.get('b_l2')),
#input_shape=(self.q_length, self.wdim),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def test_convolution_1d():
nb_samples = 2
nb_steps = 8
input_dim = 2
filter_length = 3
nb_filter = 3
for border_mode in _convolution_border_modes:
for subsample_length in [1, 2]:
if border_mode == 'same' and subsample_length != 1:
continue
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
def build_small_chrom_label(args):
model = Sequential()
model.add(Convolution1D(input_dim=len(args.inputs),
input_length=args.window_size,
nb_filter=40,
filter_length=16,
border_mode='valid',
activation="relu",
init='normal'))
model.add(MaxPooling1D(pool_length=3, stride=3))
model.add(Convolution1D(nb_filter=64, filter_length=16, activation="relu", init='normal', border_mode='valid'))
model.add(Dropout(0.2))
model.add(MaxPooling1D(pool_length=3, stride=3))
model.add(Flatten())
model.add(Dense(output_dim=32, init='normal'))
model.add(Activation('relu'))
model.add( Dense(output_dim=len(args.labels), init='normal') )
model.add( Activation('softmax'))
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=0.5)
adamo = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
classes = args.labels.keys()
my_metrics = [metrics.categorical_accuracy, precision, recall ]
model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics)
print('model summary:\n', model.summary())
return model
def build_sequential_chrom_label(args):
model = Sequential()
model.add(Convolution1D(input_dim=len(args.inputs),
input_length=args.window_size,
nb_filter=128,
filter_length=16,
border_mode='valid',
activation="relu",
init='normal'))
model.add(Dropout(0.2))
model.add(Convolution1D(nb_filter=192, filter_length=16, activation="relu", init='normal', border_mode='valid'))
model.add(Dropout(0.2))
model.add(Convolution1D(nb_filter=192, filter_length=16, activation="relu", init='normal', border_mode='valid'))
model.add(Dropout(0.2))
model.add(Convolution1D(nb_filter=256, filter_length=16, activation="relu", init='normal', border_mode='valid'))
model.add(Dropout(0.2))
model.add(MaxPooling1D(pool_length=3, stride=3))
model.add(Flatten())
model.add(Dense(output_dim=50, init='normal'))
model.add(Activation('relu'))
model.add( Dense(output_dim=len(args.labels), init='normal') )
model.add( Activation('softmax'))
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=0.5)
adamo = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
classes = args.labels.keys()
my_metrics = [metrics.categorical_accuracy, precision, recall]
model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics)
print('model summary:\n', model.summary())
return model
def baseModel(self, nb_filter=250, filter_length=3, hidden_dims=125):
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(self.max_words + self.index_from,self.embedding_dims,
input_length=self.max_length))
model.add(Dropout(0.25))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
# filter_length is like filter size, subsample_length is like step in 2D CNN.
model.add(Convolution1D(filters=nb_filter,
kernel_size=filter_length,
padding='valid',
activation='relu',
strides=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_size=2))
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.25))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop')
return model
sarcasm_detection_model_CNN_LSTM_DNN_word2vec.py 文件源码
项目:SarcasmDetection
作者: AniSkywalker
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def _build_network(self, vocab_size, maxlen, emb_weights=[], hidden_units=256, trainable=False):
print('Build model...')
model = Sequential()
model.add(Embedding(vocab_size, emb_weights.shape[1], input_length=maxlen, weights=[emb_weights],
trainable=trainable))
# model.add(Reshape((maxlen, emb_weights.shape[1], 1)))
model.add(Convolution1D(emb_weights.shape[1], 3, kernel_initializer='he_normal', padding='valid',
activation='sigmoid',
input_shape=(1, maxlen)))
# model.add(MaxPooling1D(pool_size=3))
model.add(Convolution1D(emb_weights.shape[1], 3, kernel_initializer='he_normal', padding='valid',
activation='sigmoid',
input_shape=(1, maxlen - 2)))
# model.add(MaxPooling1D(pool_size=3))
model.add(Dropout(0.25))
model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
return_sequences=True))
model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))
model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
model.add(Dense(2, activation='softmax'))
adam = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print('No of parameter:', model.count_params())
print(model.summary())
return model
sarcasm_detection_model_CNN_LSTM_DNN.py 文件源码
项目:SarcasmDetection
作者: AniSkywalker
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def _build_network(self, vocab_size, maxlen, embedding_dimension=256, hidden_units=256, trainable=False):
print('Build model...')
model = Sequential()
model.add(
Embedding(vocab_size, embedding_dimension, input_length=maxlen, embeddings_initializer='glorot_normal'))
model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
input_shape=(1, maxlen)))
# model.add(MaxPooling1D(pool_size=3))
model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
input_shape=(1, maxlen - 2)))
# model.add(MaxPooling1D(pool_size=3))
# model.add(Dropout(0.25))
model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
return_sequences=True))
model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))
model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
model.add(Dense(2))
model.add(Activation('softmax'))
adam = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print('No of parameter:', model.count_params())
print(model.summary())
return model
def _generate_model(self, lembedding, num_classes=2, num_features=128, train_vectors=True):
model = Sequential()
if lembedding.vector_box.W is None:
emb = Embedding(lembedding.vector_box.size,
lembedding.vector_box.vector_dim,
W_constraint=None,
input_length=lembedding.size)
else:
emb = Embedding(lembedding.vector_box.size,
lembedding.vector_box.vector_dim,
weights=[lembedding.vector_box.W], W_constraint=None,
input_length=lembedding.size)
emb.trainable = train_vectors
model.add(emb)
model.add(Convolution1D(num_features, 3, init='uniform'))
model.add(Activation('relu'))
model.add(MaxPooling1D(2))
model.add(Dropout(0.25))
model.add(Convolution1D(num_features, 3, init='uniform'))
model.add(Activation('relu'))
model.add(MaxPooling1D(2))
model.add(Dropout(0.25))
model.add(Flatten())
if num_classes == 2:
model.add(Dense(1, activation='sigmoid'))
if self.optimizer is None:
self.optimizer = 'rmsprop'
model.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])
else:
if self.optimizer is None:
self.optimizer = 'adam'
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])
return model
def test_convolution_1d():
nb_samples = 2
nb_steps = 8
input_dim = 2
filter_length = 3
nb_filter = 3
for border_mode in _convolution_border_modes:
for subsample_length in [1, 2]:
if border_mode == 'same' and subsample_length != 1:
continue
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
def cnn_train(X_train,y_train,vocab_size):
X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)
print('Build model...')
model = Sequential()
model.add(Embedding(vocab_size, EMBED_SIZE, input_length=MAX_LEN))
model.add(Dropout(0.25))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_length=2))
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(HIDDEN_SIZE))
model.add(Dropout(0.25))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS, show_accuracy=True)
return model
def Model1(dim, max_ques_len, max_ans_len, vocab_lim, embedding):
inp_q = Input(shape=(max_ques_len,))
embedding_q = Embedding(vocab_lim, dim, input_length=max_ques_len, weights=[embedding], trainable=False)(inp_q)
conv_q= Convolution1D(100, 5, border_mode='same', activation='relu')(embedding_q)
conv_q = Dropout(0.25)(conv_q)
pool_q = GlobalMaxPooling1D()(conv_q)
inp_a = Input(shape=(max_ans_len,))
embedding_a = Embedding(vocab_lim, dim, input_length=max_ans_len, weights=[embedding], trainable=False)(inp_a)
conv_a = Convolution1D(100, 5, border_mode='same', activation='relu')(embedding_a)
conv_a = Dropout(0.25)(conv_a)
pool_a = GlobalMaxPooling1D()(conv_a)
#sim = SimLayer(1)([pool_q, pool_a])
sim = merge([Dense(100, bias=False)(pool_q), pool_a], mode='dot')
# print pool_a, pool_q
# model1 = merge([pool_q, pool_a, sim], mode='concat')
# model = Model(input=[inp_q, inp_a], output=[model1])
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# print model.summary()
# return model
model_sim = merge([pool_q, pool_a, sim], mode='concat')
print model_sim
# #model_final = Flatten()(model_sim)
model_final = Dropout(0.5)(model_sim)
model_final = Dense(201)(model_final)
model_final = Dropout(0.5)(model_final)
model_final = Dense(1, activation='sigmoid')(model_final)
model = Model(input=[inp_q, inp_a], output=[model_final])
print(model.output_shape)
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
print model.summary()
return model
def discriminator_model_r0():
model = Sequential()
model.add(Convolution1D(
2, 5,
border_mode='same',
input_shape=(INPUT_LN, 1)))
#model.add(Reshape((2*INPUT_LN,)))
model.add(Flatten())
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def generator_model_44(): # CDNN Model
model = Sequential()
model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
model.add(Activation('relu'))
model.add(UpSampling1D(length=4))
model.add(Convolution1D(32, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(UpSampling1D(length=4))
model.add(Convolution1D(1, 5, border_mode='same'))
# model.add(Activation('relu'))
return model
def discriminator_model_r0():
model = Sequential()
model.add(Convolution1D(
2, 5,
border_mode='same',
input_shape=(INPUT_LN, 1)))
#model.add(Reshape((2*INPUT_LN,)))
model.add(Flatten())
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def generator_model_44(): # CDNN Model
model = Sequential()
model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
model.add(Activation('relu'))
model.add(UpSampling1D(length=4))
model.add(Convolution1D(32, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(UpSampling1D(length=4))
model.add(Convolution1D(1, 5, border_mode='same'))
# model.add(Activation('relu'))
return model
def test_convolution_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
filter_length = 6
nb_filter = 5
weights_in = [np.ones((nb_filter, input_dim, filter_length, 1)), np.ones(nb_filter)]
input = np.ones((nb_samples, nb_steps, input_dim))
for weight in [None, weights_in]:
for border_mode in ['valid', 'full', 'same']:
for subsample_length in [1, 3]:
if border_mode == 'same' and subsample_length != 1:
continue
for W_regularizer in [None, 'l2']:
for b_regularizer in [None, 'l2']:
for act_regularizer in [None, 'l2']:
layer = convolutional.Convolution1D(
nb_filter, filter_length, weights=weight,
border_mode=border_mode, W_regularizer=W_regularizer,
b_regularizer=b_regularizer, activity_regularizer=act_regularizer,
subsample_length=subsample_length, input_shape=(None, input_dim))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert input.shape[0] == out.shape[0]
if border_mode == 'same' and subsample_length == 1:
assert input.shape[1] == out.shape[1]
config = layer.get_config()