def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
python类Nadam()的实例源码
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def keras_mlp1(train2, y, test2, v, z):
cname = sys._getframe().f_code.co_name
def build_model(input_dims):
from keras import layers
from keras import models
from keras import optimizers
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(1024, kernel_initializer='Orthogonal')(input_)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(256, kernel_initializer='Orthogonal')(model)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy',
optimizer = optimizers.Nadam(),
#optimizer = optimizers.SGD(),
metrics = ['binary_accuracy'])
#print(model.summary(line_length=120))
return model
keras_base(train2, y, test2, v, z, build_model, 9, cname, base_seed=42)
#@tf_force_cpu
def keras_resnet1(train2, y, test2, v, z):
cname = sys._getframe().f_code.co_name
def build_model(input_dims):
from keras import layers
from keras import models
from keras import optimizers
input_ = layers.Input(shape=(input_dims,))
resnet_dims = max(input_dims * 2, 128)
model = layers.Dense(resnet_dims,
kernel_initializer='Orthogonal',
activation=layers.advanced_activations.PReLU())(input_)
model = layers.BatchNormalization()(model)
for n in range(20):
shortcut = model
model = layers.Dense(resnet_dims,
kernel_initializer='Orthogonal')(model)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(resnet_dims,
kernel_initializer='Orthogonal')(model)
model = layers.BatchNormalization()(model)
model = layers.add([model, shortcut])
model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16,
kernel_initializer='Orthogonal',
activation=layers.advanced_activations.PReLU())(model)
model = layers.BatchNormalization()(model)
model = layers.Dense(1,
activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy',
optimizer = optimizers.Nadam(),
#optimizer = optimizers.SGD(),
metrics = ['binary_accuracy'])
#print(model.summary(line_length=120))
return model
keras_base(train2, y, test2, v, z, build_model, 9, cname, base_seed=42)
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def build_model(X,dim=128):
inputs_p = Input(shape=(1,), dtype='int32')
embed_p = Embedding(
num_q,
dim,
dropout=0.2,
input_length=1
)(inputs_p)
inputs_d = Input(shape=(1,), dtype='int32')
embed_d = Embedding(
num_e,
dim,
dropout=0.2,
input_length=1
)(inputs_d)
flatten_p= Flatten()(embed_p)
flatten_d= Flatten()(embed_d)
flatten = merge([
flatten_p,
flatten_d,
],mode='concat')
fc1 = Dense(512)(flatten)
fc1 = SReLU()(fc1)
dp1 = Dropout(0.7)(fc1)
outputs = Dense(1,activation='sigmoid',name='outputs')(dp1)
inputs = [
inputs_p,
inputs_d,
]
model = Model(input=inputs, output=outputs)
nadam = Nadam()
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(
optimizer=nadam,
loss= 'binary_crossentropy'
)
return model
Stock_Prediction_Model_Stateless_LSTM.py 文件源码
项目:StockRecommendSystem
作者: doncat99
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def lstm_model(self):
model = Sequential()
first = True
for idx in range(len(self.paras.model['hidden_layers'])):
if idx == (len(self.paras.model['hidden_layers']) - 1):
model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=False))
model.add(Activation(self.paras.model['activation']))
model.add(Dropout(self.paras.model['dropout']))
elif first == True:
model.add(LSTM(input_shape=(None, int(self.paras.n_features)),
units=int(self.paras.model['hidden_layers'][idx]),
return_sequences=True))
model.add(Activation(self.paras.model['activation']))
model.add(Dropout(self.paras.model['dropout']))
first = False
else:
model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=True))
model.add(Activation(self.paras.model['activation']))
model.add(Dropout(self.paras.model['dropout']))
if self.paras.model['optimizer'] == 'sgd':
#optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.SGD(lr=self.paras.model['learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
elif self.paras.model['optimizer'] == 'rmsprop':
#optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
optimizer = optimizers.RMSprop(lr=self.paras.model['learning_rate']/10, rho=0.9, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adagrad':
#optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
optimizer = optimizers.Adagrad(lr=self.paras.model['learning_rate'], epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adam':
#optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adadelta':
optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'adamax':
optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
elif self.paras.model['optimizer'] == 'nadam':
optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
else:
optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# output layer
model.add(Dense(units=self.paras.model['out_layer']))
model.add(Activation(self.paras.model['out_activation']))
model.compile(loss=self.paras.model['loss'], optimizer=optimizer, metrics=['accuracy'])
return model
NeuralNetRegressor.py 文件源码
项目:job-salary-prediction
作者: soton-data-mining
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def predict(self):
def get_weights(model, layer_id):
layer = model.layers[layer_id]
weights = layer.get_weights()
firstWeights = weights[1]
print(firstWeights)
def export_model(model, name):
if not (os.path.exists("neural_net_models")):
os.makedirs("neural_net_models")
model_json = model.to_json()
with open("neural_net_models/" + name + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("neural_net_models/" + name + ".h5")
def import_model(model_name):
json_file = open("neural_net_models/" + model_name + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("neural_net_models/" + model_name + ".h5")
print("Loaded " + model_name + " from disk")
return model
model = import_model('ut_Dense100_L1_m5s3_L2_m1s03_lr07_d1e07')
"""
model = Sequential()
model.add(Dense(100, input_dim=85, activation='relu',
kernel_initializer=initializers.RandomNormal(
mean=5, stddev=3, seed=None)))
model.add(Dense(1, activation='linear',
kernel_initializer=initializers.RandomNormal(
mean=1, stddev=0.3, seed=None)))
"""
# rms = opt.RMSprop(lr=0.01, rho=0.9, epsilon=1e-08, decay =1e-9)
adadelta = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
# nadam = opt.Nadam(lr=0.05, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
model.compile(loss='mean_absolute_error', optimizer=adadelta, metrics=[metrics.mae])
# optimizer='adam'
model.fit(
self.x_train, self.y_train,
validation_data=(self.x_test, self.y_test),
epochs=1000, batch_size=160000, verbose=1
)
export_model(model, 'ut_Dense100_L1_m5s3_L2_m1s03_lr07_d1e07')
return (self.y_train, self.y_test)
def __init__(self, word_index, embedding_matrix):
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
conv_blocksA = []
conv_blocksQ = []
for sz in [3,5]:
conv = Convolution1D(filters=20,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(embedded_answer)
conv = MaxPooling1D(pool_size=2)(conv)
conv = Flatten()(conv)
conv_blocksA.append(conv)
for sz in [5,7, 9]:
conv = Convolution1D(filters=20,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(embedded_question)
conv = MaxPooling1D(pool_size=3)(conv)
conv = Flatten()(conv)
conv_blocksQ.append(conv)
z = Concatenate()(conv_blocksA + conv_blocksQ)
z = Dropout(0.5)(z)
z = Dense(100, activation="relu")(z)
softmax_c_q = Dense(2, activation='softmax')(z)
self.model = Model([question, answer], softmax_c_q)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])