def train(img_shape):
classes = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
# Model
model = Sequential()
model.add(Convolution2D(
32, 3, 3, input_shape=img_shape, activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(len(classes), activation='softmax'))
features, labels = get_featurs_labels(img_shape)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(features, labels, nb_epoch=10, batch_size=32, validation_split=0.2, verbose=1)
return model
python类maxnorm()的实例源码
cnn.py 文件源码
项目:Nature-Conservancy-Fish-Image-Prediction
作者: Brok-Bucholtz
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution2D(
nb_filter=nb_filter,
nb_row=fsz,
nb_col=self.wdim,
border_mode='valid',
init='glorot_uniform',
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def load_model(data):
'''
Load keras model.
'''
model = Sequential()
model.add(Dense(data.shape[1], activation='relu',
input_dim=data.shape[1], kernel_constraint=maxnorm(3)))
model.add(Dropout(0.25))
#model.add(Dense(16, activation='relu', kernel_constraint=maxnorm(3)))
#model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='RMSprop', loss='binary_crossentropy',
metrics=['accuracy'])
return model
def create_model(learning_rate=0.1, momentum=0.9):
model = Sequential()
model.add(Convolution2D(20, 9, 9, border_mode='same', input_shape=(3, SIZE, SIZE)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Convolution2D(50, 5, 5, activation = "relu"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(768, input_dim=3072, init='uniform', activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(384, init = 'uniform', activation = 'relu', W_constraint=maxnorm(3)))
model.add(Dense(4))
model.add(Activation("softmax"))
sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=True, decay=1e-6)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
return model
def CNNWithKeywordLayer(embed_matrix, embed_input, sequence_length, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, model_variation, embedding_dim=300):
''' 2-way input model: left is cnn for sentence embedding while right is keywords
'''
embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
# 1. question model part
question_branch = Sequential()
cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
question_branch.add(embed1)
question_branch.add(cnn_model)
# 2. keyword model part
#keyword_branch = KeywordLayer(keywords_length, embed_input, embedding_dim, embed_matrix)
keyword_branch = LSTMLayer(embed_matrix, embed_input, keywords_length, dropout_prob, hidden_dims, embedding_dim)
# 3. merge layer
merged = Merge([question_branch, keyword_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
final_model.add(Dropout(0.5))
final_model.add(Activation('relu'))
final_model.add(Dense(1))
final_model.add(Activation('sigmoid'))
#sgd = SGD(lr=0.01, momentum=0.9)
final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return final_model
def QuestionWithAnswersModel(embed_matrix, embed_input, sequence_length, ans_cnt, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, embedding_dim=300):
''' path1: question embedding (CNN model)
path2: answer embeddin(Hierachical RNN model)
merge
'''
# path 1
embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
question_branch = Sequential()
cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
question_branch.add(embed1)
question_branch.add(cnn_model)
# path 2
answer_branch = HierarchicalRNN(embed_matrix, embed_input, ans_cnt, keywords_length, embedding_dim)
merged = Merge([question_branch, answer_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
final_model.add(Dropout(0.5))
final_model.add(Activation('relu'))
final_model.add(Dense(1))
final_model.add(Activation('sigmoid'))
final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return final_model
# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
def test_maxnorm():
for m in test_values:
norm_instance = constraints.maxnorm(m)
normed = norm_instance(K.variable(example_array))
assert(np.all(K.eval(normed) < m))
# a more explicit example
norm_instance = constraints.maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0],
[2.0, 0, 0],
[2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
x_normed_actual = K.eval(norm_instance(K.variable(x)))
assert_allclose(x_normed_actual, x_normed_target, rtol=1e-05)
def setup_model(embeddings, seq_len, vocab_size):
# Add input
inputs = Input(shape=(seq_len, ), dtype='int32', name='inputs')
# Add word vector embeddings
embedding = Embedding(input_dim=vocab_size, output_dim=embedding_size,
input_length=seq_len, name='embedding',
trainable=True)(inputs)
h = GlobalAveragePooling1D()(embedding)
# Add output layer
output = Dense(units=output_size,
activation='sigmoid',
kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(l2_reg_lambda),
# kernel_constraint=maxnorm(max_norm),
# bias_constraint=maxnorm(max_norm),
name='output')(h)
# build the model
model = Model(inputs=inputs, outputs=output)
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adam(lr=base_lr, epsilon=1e-6, decay=decay_rate),
metrics=["accuracy"])
return model
cnn_bounding_box.py 文件源码
项目:Nature-Conservancy-Fish-Image-Prediction
作者: Brok-Bucholtz
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def train(img_shape):
# Model
model = Sequential()
model.add(
Convolution2D(32, 3, 3, input_shape=img_shape, activation='relu', W_constraint=maxnorm(3), dim_ordering='tf'))
model.add(Dropout(0.2))
model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3), dim_ordering='tf'))
model.add(MaxPooling2D())
model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3), dim_ordering='tf'))
model.add(MaxPooling2D())
model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3), dim_ordering='tf'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(512, activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(8))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
for features, labels in feature_labels_generator():
model.fit(features, labels, nb_epoch=1)
# TODO: Get generator to
# samples_per_epoch = 100
# model.fit_generator(feature_labels_generator(), samples_per_epoch, nb_epoch=10)
return model
def create_model(dropout_rate=0.0, weight_constraint=0):
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(weight_constraint)))
model.add(Dropout(dropout_rate))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# fix random seed for reproducibility
def create_model(neurons=1):
# create model
model = Sequential()
model.add(Dense(neurons, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(4)))
model.add(Dropout(0.1))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# fix random seed for reproducibility
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution1D(
nb_filter=nb_filter,
filter_length=fsz,
border_mode='valid',
#activation='relu',
subsample_length=1,
init='glorot_uniform',
#init=init,
#init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
#W_regularizer=regularizers.l2(self.params.get('w_l2')),
#b_regularizer=regularizers.l2(self.params.get('b_l2')),
#input_shape=(self.q_length, self.wdim),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution1D(
nb_filter=nb_filter,
filter_length=fsz,
border_mode='valid',
#activation='relu',
subsample_length=1,
init='glorot_uniform',
#init=init,
#init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
#W_regularizer=regularizers.l2(self.params.get('w_l2')),
#b_regularizer=regularizers.l2(self.params.get('b_l2')),
#input_shape=(self.q_length, self.wdim),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert (np.all(normed.eval() < m))
# a more explicit example
norm_instance = maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
x_normed_actual = norm_instance(x).eval()
assert_allclose(x_normed_actual, x_normed_target)
def test_maxnorm():
for m in test_values:
norm_instance = constraints.maxnorm(m)
normed = norm_instance(K.variable(example_array))
assert(np.all(K.eval(normed) < m))
# a more explicit example
norm_instance = constraints.maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0],
[2.0, 0, 0],
[2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
x_normed_actual = K.eval(norm_instance(K.variable(x)))
assert_allclose(x_normed_actual, x_normed_target, rtol=1e-05)
def test_maxnorm():
for m in test_values:
norm_instance = constraints.maxnorm(m)
normed = norm_instance(K.variable(example_array))
assert(np.all(K.eval(normed) < m))
# a more explicit example
norm_instance = constraints.maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0],
[2.0, 0, 0],
[2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
x_normed_actual = K.eval(norm_instance(K.variable(x)))
assert_allclose(x_normed_actual, x_normed_target, rtol=1e-05)
def feed_forward_net(input, output, hidden_layers=[64, 64], activations='relu',
dropout_rate=0., l2=0., constrain_norm=False):
'''
Helper function for building a Keras feed forward network.
input: Keras Input object appropriate for the data. e.g. input=Input(shape=(20,))
output: Function representing final layer for the network that maps from the last
hidden layer to output.
e.g. if output = Dense(10, activation='softmax') if we're doing 10 class
classification or output = Dense(1, activation='linear') if we're doing
regression.
'''
state = input
if isinstance(activations, str):
activations = [activations] * len(hidden_layers)
for h, a in zip(hidden_layers, activations):
if l2 > 0.:
w_reg = keras.regularizers.l2(l2)
else:
w_reg = None
const = maxnorm(2) if constrain_norm else None
state = Dense(h, activation=a, kernel_regularizer=w_reg, kernel_constraint=const)(state)
if dropout_rate > 0.:
state = Dropout(dropout_rate)(state)
return output(state)
def convnet(input, output, dropout_rate=0., input_shape=(1, 28, 28), batch_size=100,
l2_rate=0.001, nb_epoch=12, img_rows=28, img_cols=28, nb_filters=64,
pool_size=(2, 2), kernel_size=(3, 3), activations='relu', constrain_norm=False):
'''
Helper function for building a Keras convolutional network.
input: Keras Input object appropriate for the data. e.g. input=Input(shape=(20,))
output: Function representing final layer for the network that maps from the last
hidden layer to output.
e.g. if output = Dense(10, activation='softmax') if we're doing 10 class
classification or output = Dense(1, activation='linear') if we're doing
regression.
'''
const = maxnorm(2) if constrain_norm else None
state = Convolution2D(nb_filters, kernel_size, padding='valid',
input_shape=input_shape, activation=activations,
kernel_regularizer=l2(l2_rate), kernel_constraint=const)(input)
state = Convolution2D(nb_filters, kernel_size,
activation=activations, kernel_regularizer=l2(l2_rate),
kernel_constraint=const)(state)
state = MaxPooling2D(pool_size=pool_size)(state)
state = Flatten()(state)
if dropout_rate > 0.:
state = Dropout(dropout_rate)(state)
state = Dense(128, activation=activations, kernel_regularizer=l2(l2_rate), kernel_constraint=const)(state)
if dropout_rate > 0.:
state = Dropout(dropout_rate)(state)
return output(state)
def base_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=x_train.shape[1:]))
model.add(Dropout(0.2))
model.add(Conv2D(32,(3,3),padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,(3,3),padding='same',activation='relu'))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3,3),padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,(3,3),padding='same',activation='relu'))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3,3),padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(1024,activation='relu',kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
sgd = SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=False)
# Train model
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert (np.all(normed.eval() < m))
# a more explicit example
norm_instance = maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
x_normed_actual = norm_instance(x).eval()
assert_allclose(x_normed_actual, x_normed_target)
def build_model(data, word_weights, tag_window=5, embed_dim=100):
batch_size = 32
nb_epoch = 16
nb_class = 4
hidden_dim = 128
train_x = np.array(list(data['x']))
train_y = np.array(list(data['y']))
train_y = np_utils.to_categorical(train_y, nb_class)
print(train_x.shape)
print(train_y.shape)
input_x = Input(shape=(tag_window, ), dtype='float32', name='input_x')
embed_x = Embedding(output_dim=embed_dim,
input_dim=word_weights.shape[0],
input_length=tag_window,
weights=[word_weights],
name='embed_x')(input_x)
bi_lstm = Bidirectional(LSTM(hidden_dim, return_sequences=False), merge_mode='sum')(embed_x)
x_dropout = Dropout(0.5)(bi_lstm)
x_output = Dense(nb_class,
# kernel_regularizer=regularizers.l2(0.01),
# kernel_constraint=maxnorm(3.0),
# activity_regularizer=regularizers.l2(0.01),
activation='softmax')(x_dropout)
model = Model(input=[input_x], output=[x_output])
model.compile(optimizer='adamax', loss='categorical_crossentropy',metrics=['accuracy'])
model.fit([train_x], [train_y], validation_split=0.2,
batch_size=batch_size, epochs=nb_epoch, shuffle=True)
def build_cnn(kernel_size, nb_filters, embed_x):
"""
"""
name = 'cnn_' + str(kernel_size)
cnn_x = Conv1D(filters=nb_filters,
kernel_size=kernel_size,
padding='valid',
activation='tanh')(embed_x)
# kernel_regularizer=regularizers.l2(0.01),
# kernel_constraint=maxnorm(3.0),
# activity_regularizer=regularizers.l2(0.01))(embed_x)
maxPooling_x = MaxPooling1D(kernel_size)(cnn_x)
return maxPooling_x
def build_model(data, word_weights, tag_window=5, embed_dim=100):
batch_size = 50
nb_epoch = 8
nb_class = 4
hidden_dim = 128
nb_filters = 100
train_x = np.array(list(data['x']))
train_y = np.array(list(data['y']))
train_y = np_utils.to_categorical(train_y, nb_class)
print(train_x.shape)
print(train_y.shape)
input_x = Input(shape=(tag_window, ), dtype='float32', name='input_x')
embed_x = Embedding(output_dim=embed_dim,
input_dim=word_weights.shape[0],
input_length=tag_window,
weights=[word_weights],
name='embed_x')(input_x)
# bi_lstm = Bidirectional(LSTM(hidden_dim, return_sequences=False), merge_mode='sum')(embed_x)
maxPooling_2 = build_cnn(2, nb_filters, embed_x)
print('finish 2')
maxPooling_3 = build_cnn(3, nb_filters, embed_x)
print('finish 3')
maxPooling_4 = build_cnn(4, nb_filters, embed_x)
print('finish 4')
maxPooling_5 = build_cnn(5, nb_filters, embed_x)
maxPooling = concatenate([maxPooling_2, maxPooling_3, maxPooling_4, maxPooling_5], axis=1)
x_dropout = Dropout(0.5)(maxPooling_2)
x_flatten = Flatten()(x_dropout)
x_output = Dense(nb_class,
# kernel_regularizer=regularizers.l2(0.01),
# kernel_constraint=maxnorm(3.0),
# activity_regularizer=regularizers.l2(0.01),
activation='softmax')(x_flatten)
model = Model(input=[input_x], output=[x_output])
model.compile(optimizer='adamax', loss='categorical_crossentropy',metrics=['accuracy'])
model.fit([train_x], [train_y], validation_split=0.2,
batch_size=batch_size, epochs=nb_epoch, shuffle=True)
def model(X_train, Y_train, X_test, Y_test):
W_maxnorm = 3
DROPOUT = {{choice([0.3,0.5,0.7])}}
model = Sequential()
model.add(Convolution2D(64, 1, 5, border_mode='same', input_shape=(4, 1, DATASIZE),activation='relu',W_constraint=maxnorm(W_maxnorm)))
model.add(MaxPooling2D(pool_size=(1, 5),strides=(1,3)))
model.add(Flatten())
model.add(Dense(32,activation='relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(32,activation='relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(2))
model.add(Activation('softmax'))
myoptimizer = RMSprop(lr={{choice([0.01,0.001,0.0001])}}, rho=0.9, epsilon=1e-06)
mylossfunc = 'categorical_crossentropy'
model.compile(loss=mylossfunc, optimizer=myoptimizer,metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=100, nb_epoch=5,validation_split=0.1)
score, acc = model.evaluate(X_test,Y_test)
model_arch = 'MODEL_ARCH'
bestaccfile = join('TOPDIR',model_arch,model_arch+'_hyperbestacc')
reportAcc(acc,score,bestaccfile)
return {'loss': score, 'status': STATUS_OK,'model':(model.to_json(),myoptimizer,mylossfunc)}
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert (np.all(normed.eval() < m))
# a more explicit example
norm_instance = maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
x_normed_actual = norm_instance(x).eval()
assert_allclose(x_normed_actual, x_normed_target)
def build_model():
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Convolution1D(nb_filter=nb_filter,
filter_length=4,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_length=2,
name='maxConv4')(conv4)
conv5 = Convolution1D(nb_filter=nb_filter,
filter_length=5,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_length=2,
name='maxConv5')(conv5)
x = merge([maxConv4, maxConv5], mode='concat')
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', init='he_normal',
W_constraint = maxnorm(3), b_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(1, init='he_normal',
activation='sigmoid', name='output')(x)
model = Model(input=main_input, output=output)
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=0.95, epsilon=1e-06),
metrics=["accuracy"])
return model
def build_model():
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Conv1D(filters=nb_filter,
kernel_size=4,
padding='valid',
activation='relu',
strides=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_size=2,
name='maxConv4')(conv4)
conv5 = Conv1D(filters=nb_filter,
kernel_size=5,
padding='valid',
activation='relu',
strides=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_size=2,
name='maxConv5')(conv5)
# x = merge([maxConv4, maxConv5], mode='concat')
x = keras.layers.concatenate([maxConv4, maxConv5])
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', kernel_initializer='he_normal',
kernel_constraint = maxnorm(3), bias_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(1, kernel_initializer='he_normal',
activation='sigmoid', name='output')(x)
model = Model(inputs=main_input, outputs=output)
model.compile(loss='binary_crossentropy',
# optimizer=Adadelta(lr=0.95, epsilon=1e-06),
# optimizer=Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
# optimizer=Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4),
metrics=["accuracy"])
return model
def build_model():
print('Build model...%d of %d' % (i + 1, folds))
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Convolution1D(nb_filter=nb_filter,
filter_length=4,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_length=2,
name='maxConv4')(conv4)
conv5 = Convolution1D(nb_filter=nb_filter,
filter_length=5,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_length=2,
name='maxConv5')(conv5)
x = merge([maxConv4, maxConv5], mode='concat')
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', init='he_normal',
W_constraint = maxnorm(3), b_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(1, init='he_normal',
activation='sigmoid', name='output')(x)
model = Model(input=main_input, output=output)
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=0.95, epsilon=1e-06),
metrics=["accuracy"])
return model
def build_model():
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Convolution1D(nb_filter=nb_filter,
filter_length=4,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_length=2,
name='maxConv4')(conv4)
conv5 = Convolution1D(nb_filter=nb_filter,
filter_length=5,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_length=2,
name='maxConv5')(conv5)
x = merge([maxConv4, maxConv5], mode='concat')
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', init='he_normal',
W_constraint = maxnorm(3), b_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(nb_classes, init='he_normal',
activation='softmax', name='output')(x)
model = Model(input=main_input, output=output)
model.compile(loss={'output':'categorical_crossentropy'},
optimizer=Adadelta(lr=0.95, epsilon=1e-06),
metrics=["accuracy"])
return model
def model():
model = Sequential()
#input layer
model.add(Dense(120, input_dim=input_dims)) #, kernel_constraint=maxnorm(5)
#model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2)) # Reduce Overfitting With Dropout Regularization
# hidden layers
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
model.add(Dense(120))
model.add(Activation(act_func))
model.add(Dropout(0.2))
# output layer (y_pred)
model.add(Dense(1, activation='linear'))
# Use a large learning rate with decay and a large momentum. Increase your learning rate by a factor of 10 to 100 and use a high momentum value of 0.9 or 0.99
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# compile this model
model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
optimizer='adam',
metrics=[r2_keras] # you can add several if needed
)
# Visualize NN architecture
print(model.summary())
return model
# initialize input dimension