def compile(self, optimizer='sgd'):
optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}
model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
return model
python类Adadelta()的实例源码
def compile(self, optimizer='sgd'):
optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}
self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
return self.model
def compile(self, optimizer='sgd'):
optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}
self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
return self.model
def compile(self, optimizer='sgd'):
optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}
self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
return self.model
def compile(self, optimizer='sgd'):
optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}
self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
return self.model
def compile(self, optimizer='sgd'):
optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}
self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
return self.model
def test_adadelta(self):
print('test Adadelta')
self.assertTrue(_test_optimizer(Adadelta()))
def __construct_neural_network(self):
model = Sequential()
model.add(Dense(400,
activation='relu',
input_shape=(len(self.dictionary.keys()),)))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(self.all_topics), activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=Adadelta(),
metrics=['accuracy'])
return model
def _construct(self, inputShape):
init = 'glorot_normal'
activation = 'relu'
loss = 'mse'
print "loss", loss
layers = [self.prms["h1"], self.prms["h2"]]
dropout = [self.prms["dropout1"], self.prms["dropout2"]]
optimizer = Adadelta(lr=self.prms["adadelta_lr"], rho=(1.0 - self.prms["adadelta_rho_m"]),
epsilon=self.prms["adadelta_eps"])
decay = self.prms["decay"]
model = Sequential()
for i in range(len(layers)):
if i == 0:
print ("Input shape: " + str(inputShape))
print ("Adding Layer " + str(i) + ": " + str(layers[i]))
model.add(Dense(layers[i], input_dim=inputShape, init=init, W_regularizer=l2(decay)))
else:
print ("Adding Layer " + str(i) + ": " + str(layers[i]))
model.add(Dense(layers[i], init=init, W_regularizer=l2(decay)))
print ("Adding " + activation + " layer")
model.add(Activation(activation))
model.add(BatchNormalization())
if len(dropout) > i:
print ("Adding " + str(dropout[i]) + " dropout")
model.add(Dropout(dropout[i]))
model.add(Dense(1, init=init)) # End in a single output node for regression style output
# ADAM=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
model.compile(loss=loss, optimizer=optimizer)
self.model = model
def build_model():
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Convolution1D(nb_filter=nb_filter,
filter_length=4,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_length=2,
name='maxConv4')(conv4)
conv5 = Convolution1D(nb_filter=nb_filter,
filter_length=5,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_length=2,
name='maxConv5')(conv5)
x = merge([maxConv4, maxConv5], mode='concat')
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', init='he_normal',
W_constraint = maxnorm(3), b_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(1, init='he_normal',
activation='sigmoid', name='output')(x)
model = Model(input=main_input, output=output)
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=0.95, epsilon=1e-06),
metrics=["accuracy"])
return model
def build_model():
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Conv1D(filters=nb_filter,
kernel_size=4,
padding='valid',
activation='relu',
strides=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_size=2,
name='maxConv4')(conv4)
conv5 = Conv1D(filters=nb_filter,
kernel_size=5,
padding='valid',
activation='relu',
strides=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_size=2,
name='maxConv5')(conv5)
# x = merge([maxConv4, maxConv5], mode='concat')
x = keras.layers.concatenate([maxConv4, maxConv5])
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', kernel_initializer='he_normal',
kernel_constraint = maxnorm(3), bias_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(1, kernel_initializer='he_normal',
activation='sigmoid', name='output')(x)
model = Model(inputs=main_input, outputs=output)
model.compile(loss='binary_crossentropy',
# optimizer=Adadelta(lr=0.95, epsilon=1e-06),
# optimizer=Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
# optimizer=Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4),
metrics=["accuracy"])
return model
def build_model():
print('Build model...%d of %d' % (i + 1, folds))
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Convolution1D(nb_filter=nb_filter,
filter_length=4,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_length=2,
name='maxConv4')(conv4)
conv5 = Convolution1D(nb_filter=nb_filter,
filter_length=5,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_length=2,
name='maxConv5')(conv5)
x = merge([maxConv4, maxConv5], mode='concat')
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', init='he_normal',
W_constraint = maxnorm(3), b_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(1, init='he_normal',
activation='sigmoid', name='output')(x)
model = Model(input=main_input, output=output)
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=0.95, epsilon=1e-06),
metrics=["accuracy"])
return model
def build_model():
main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
embedding = Embedding(max_features, embedding_dims,
weights=[np.matrix(W)], input_length=maxlen,
name='embedding')(main_input)
embedding = Dropout(0.50)(embedding)
conv4 = Convolution1D(nb_filter=nb_filter,
filter_length=4,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv4')(embedding)
maxConv4 = MaxPooling1D(pool_length=2,
name='maxConv4')(conv4)
conv5 = Convolution1D(nb_filter=nb_filter,
filter_length=5,
border_mode='valid',
activation='relu',
subsample_length=1,
name='conv5')(embedding)
maxConv5 = MaxPooling1D(pool_length=2,
name='maxConv5')(conv5)
x = merge([maxConv4, maxConv5], mode='concat')
x = Dropout(0.15)(x)
x = RNN(rnn_output_size)(x)
x = Dense(hidden_dims, activation='relu', init='he_normal',
W_constraint = maxnorm(3), b_constraint=maxnorm(3),
name='mlp')(x)
x = Dropout(0.10, name='drop')(x)
output = Dense(nb_classes, init='he_normal',
activation='softmax', name='output')(x)
model = Model(input=main_input, output=output)
model.compile(loss={'output':'categorical_crossentropy'},
optimizer=Adadelta(lr=0.95, epsilon=1e-06),
metrics=["accuracy"])
return model
def main():
# read pre-trained embeddings
embeddings = load_embeddings(embedding_path, 'word2vec')
test_accus = [] # Collect test accuracy for each fold
for i in xrange(n_folds):
fold = i + 1
logging.info('Fold {} of {}...'.format(fold, n_folds))
# read data
train_data, train_labels, test_data, test_labels, seq_len, vocab_size = load_data_MR_fasttext(data_path, fold=fold)
# update train directory according to fold number
train_dir = base_train_dir + '/' + str(fold)
# create train directory if not exist
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# create log file handler
file_handler = logging.FileHandler(pjoin(train_dir, "log.txt"))
logging.getLogger().addHandler(file_handler)
# check whether the model has been trained, if not, create a new one
if os.path.exists(train_dir + '/model.json'):
# load json and create model
json_file = open(train_dir + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(train_dir + "/model.h5")
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=base_lr, epsilon=1e-6, decay=decay_rate),
metrics=["accuracy"])
print("Loaded model from disk!")
else:
model = setup_model(embeddings, seq_len, vocab_size)
print("Created a new model!")
# train the model
test_accu = train(model, train_data, train_labels, test_data, test_labels, embeddings, train_dir)
# log test accuracy result
logging.info("\nTest Accuracy for fold {}: {}".format(fold, test_accu))
test_accus.append(test_accu)
# write log of test accuracy for all folds
test_accu_log = open(base_train_dir + "/final_test_accuracy.txt", 'w')
test_accu_log.write('\n'.join(['Fold {} Test Accuracy: {}'.format(fold, test_accu) for fold, test_accu in enumerate(test_accus)]))
test_accu_log.write('\nAvg test acc: {}'.format(np.mean(test_accus)))
def main():
# read pre-trained embeddings
embeddings = load_embeddings(embedding_path, 'word2vec')
test_accus = [] # Collect test accuracy for each fold
for i in xrange(n_folds):
fold = i + 1
logging.info('Fold {} of {}...'.format(fold, n_folds))
# read data
train_data, train_labels, test_data, test_labels, seq_len, vocab_size = load_data_MR(data_path, fold=fold)
# update train directory according to fold number
train_dir = base_train_dir + '/' + str(fold)
# create train directory if not exist
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# create log file handler
file_handler = logging.FileHandler(pjoin(train_dir, "log.txt"))
logging.getLogger().addHandler(file_handler)
# check whether the model has been trained, if not, create a new one
if os.path.exists(train_dir + '/model.json'):
# load json and create model
json_file = open(train_dir + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(train_dir + "/model.h5")
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=base_lr, epsilon=1e-6, decay=decay_rate),
metrics=["accuracy"])
print("Loaded model from disk!")
else:
model = setup_model(embeddings, seq_len, vocab_size)
print("Created a new model!")
# train the model
test_accu = train(model, train_data, train_labels, test_data, test_labels, embeddings, train_dir)
# log test accuracy result
logging.info("\nTest Accuracy for fold {}: {}".format(fold, test_accu))
test_accus.append(test_accu)
# write log of test accuracy for all folds
test_accu_log = open(base_train_dir + "/final_test_accuracy.txt", 'w')
test_accu_log.write('\n'.join(['Fold {} Test Accuracy: {}'.format(fold, test_accu) for fold, test_accu in enumerate(test_accus)]))
test_accu_log.write('\nAvg test acc: {}'.format(np.mean(test_accus)))
def main():
# read pre-trained embeddings
embeddings = load_embeddings(embedding_path, 'word2vec')
test_accus = [] # Collect test accuracy for each fold
for i in xrange(n_folds):
fold = i + 1
logging.info('Fold {} of {}...'.format(fold, n_folds))
# read data
train_data, train_labels, test_data, test_labels, seq_len, vocab_size = load_data_MR(data_path, fold=fold)
# update train directory according to fold number
train_dir = base_train_dir + '/' + str(fold)
# create train directory if not exist
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# create log file handler
file_handler = logging.FileHandler(pjoin(train_dir, "log.txt"))
logging.getLogger().addHandler(file_handler)
# check whether the model has been trained, if not, create a new one
if os.path.exists(train_dir + '/model.json'):
# load json and create model
json_file = open(train_dir + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(train_dir + "/model.h5")
model.compile(loss={'output':'binary_crossentropy'},
optimizer=Adadelta(lr=base_lr, epsilon=1e-6, decay=decay_rate),
metrics=["accuracy"])
print("Loaded model from disk!")
else:
model = setup_model(embeddings, seq_len, vocab_size)
print("Created a new model!")
# train the model
test_accu = train(model, train_data, train_labels, test_data, test_labels, embeddings, train_dir)
# log test accuracy result
logging.info("\nTest Accuracy for fold {}: {}".format(fold, test_accu))
test_accus.append(test_accu)
# write log of test accuracy for all folds
test_accu_log = open(base_train_dir + "/final_test_accuracy.txt", 'w')
test_accu_log.write('\n'.join(['Fold {} Test Accuracy: {}'.format(fold, test_accu) for fold, test_accu in enumerate(test_accus)]))
test_accu_log.write('\nAvg test acc: {}'.format(np.mean(test_accus)))
def LSTM_model2(X_train,Y_train,X_val,Y_val,X_test,Y_test,test_label):
print('Loading embedding successful!')
print('len(X_train):'+str(len(X_train)))
print('len(X_val):'+str(len(X_val)))
print('len(X_test):'+str(len(X_test)))
print('len(Y_train):'+str(len(Y_train)))
print('len(Y_val):')+str(len(Y_val))
print('len(Y_test):'+str(len(Y_test)))
# print(test_label)
print('X_train shape:',X_train.shape)
print('X_val shape:',X_val.shape)
print('X_test shape:',X_test.shape)
print('Build model...')
model=Sequential()
# ??lstm
# model.add(LSTM(lstm_output_dim,return_sequences=True,\
# input_shape=(maxlen,embedding_dim)))
# model.add(LSTM(lstm_output_dim,return_sequences=True))
# model.add(LSTM(lstm_output_dim))
model.add(LSTM(lstm_output_dim,input_shape=(maxlen,embedding_dim)))
model.add(Dense(hidden_dim))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# model.add(Dense(hidden_dim))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# optmr=Adadelta(lr=0.9,rho=0.90,epsilon=1e-08)
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
plot(model,to_file='model.png')
checkpointer=ModelCheckpoint(filepath='best_model.hdf5',monitor='val_acc',verbose=1,\
save_best_only=True,mode='max')
# history=LossHistory()
hist=model.fit(X_train,Y_train, batch_size=32, nb_epoch=20, verbose=1, shuffle=True, #20 10
validation_data=(X_val,Y_val),callbacks=[checkpointer])
# print(history.losses)
print hist.history
model.load_weights('best_model.hdf5')
# score=model.evaluate(X_test,Y_test,batch_size=32,verbose=1)
# print 'score:',score
#p_label=model.predict_classes(X_test,batch_size=32,verbose=1) # ????????
p_prob=model.predict_proba(X_test,batch_size=32,verbose=1)
p_label=np.array([np.argsort(item)[-1] for item in p_prob])
test_acc=np_utils.accuracy(p_label,test_label)
return p_label,p_prob
Stock_Prediction_Model_Stateless_LSTM.py 文件源码
项目:StockRecommendSystem
作者: doncat99
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def lstm_model(self):
model = Sequential()
first = True
for idx in range(len(self.paras.model['hidden_layers'])):
if idx == (len(self.paras.model['hidden_layers']) - 1):
model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=False))
model.add(Activation(self.paras.model['activation']))
model.add(Dropout(self.paras.model['dropout']))
elif first == True:
model.add(LSTM(input_shape=(None, int(self.paras.n_features)),
units=int(self.paras.model['hidden_layers'][idx]),
return_sequences=True))
model.add(Activation(self.paras.model['activation']))
model.add(Dropout(self.paras.model['dropout']))
first = False
else:
model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=True))
model.add(Activation(self.paras.model['activation']))
model.add(Dropout(self.paras.model['dropout']))
if self.paras.model['optimizer'] == 'sgd':
#optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.SGD(lr=self.paras.model['learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
elif self.paras.model['optimizer'] == 'rmsprop':
#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
optimizer = optimizers.RMSprop(lr=self.paras.model['learning_rate']/10, rho=0.9, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adagrad':
#optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
optimizer = optimizers.Adagrad(lr=self.paras.model['learning_rate'], epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adam':
#optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adadelta':
optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adamax':
optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'nadam':
optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
else:
optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# output layer
model.add(Dense(units=self.paras.model['out_layer']))
model.add(Activation(self.paras.model['out_activation']))
model.compile(loss=self.paras.model['loss'], optimizer=optimizer, metrics=['accuracy'])
return model
NeuralNetRegressor.py 文件源码
项目:job-salary-prediction
作者: soton-data-mining
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def predict(self):
def get_weights(model, layer_id):
layer = model.layers[layer_id]
weights = layer.get_weights()
firstWeights = weights[1]
print(firstWeights)
def export_model(model, name):
if not (os.path.exists("neural_net_models")):
os.makedirs("neural_net_models")
model_json = model.to_json()
with open("neural_net_models/" + name + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("neural_net_models/" + name + ".h5")
def import_model(model_name):
json_file = open("neural_net_models/" + model_name + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("neural_net_models/" + model_name + ".h5")
print("Loaded " + model_name + " from disk")
return model
model = import_model('ut_Dense100_L1_m5s3_L2_m1s03_lr07_d1e07')
"""
model = Sequential()
model.add(Dense(100, input_dim=85, activation='relu',
kernel_initializer=initializers.RandomNormal(
mean=5, stddev=3, seed=None)))
model.add(Dense(1, activation='linear',
kernel_initializer=initializers.RandomNormal(
mean=1, stddev=0.3, seed=None)))
"""
# rms = opt.RMSprop(lr=0.01, rho=0.9, epsilon=1e-08, decay =1e-9)
adadelta = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
# nadam = opt.Nadam(lr=0.05, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
model.compile(loss='mean_absolute_error', optimizer=adadelta, metrics=[metrics.mae])
# optimizer='adam'
model.fit(
self.x_train, self.y_train,
validation_data=(self.x_test, self.y_test),
epochs=1000, batch_size=160000, verbose=1
)
export_model(model, 'ut_Dense100_L1_m5s3_L2_m1s03_lr07_d1e07')
return (self.y_train, self.y_test)
def model_generate():
img_rows, img_cols = 48, 48
model = Sequential()
model.add(Convolution2D(64, 5, 5, border_mode='valid',
input_shape=(1, img_rows, img_cols)))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(keras.layers.convolutional.ZeroPadding2D(padding=(2, 2), dim_ordering='th'))
model.add(MaxPooling2D(pool_size=(5, 5),strides=(2, 2)))
model.add(keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='th'))
model.add(Convolution2D(64, 3, 3))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='th'))
model.add(Convolution2D(64, 3, 3))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(keras.layers.convolutional.AveragePooling2D(pool_size=(3, 3),strides=(2, 2)))
model.add(keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='th'))
model.add(Convolution2D(128, 3, 3))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='th'))
model.add(Convolution2D(128, 3, 3))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='th'))
model.add(keras.layers.convolutional.AveragePooling2D(pool_size=(3, 3),strides=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(keras.layers.advanced_activations.PReLU(init='zero', weights=None))
model.add(Dropout(0.2))
model.add(Dense(7))
model.add(Activation('softmax'))
ada = Adadelta(lr=0.1, rho=0.95, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=ada,
metrics=['accuracy'])
model.summary()
return model