def fit(self, X, y):
## scaler
self.scaler = StandardScaler()
X = self.scaler.fit_transform(X)
#### build model
self.model = Sequential()
## input layer
self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
## hidden layers
first = True
hidden_layers = self.hidden_layers
while hidden_layers > 0:
self.model.add(Dense(self.hidden_units))
if self.batch_norm == "before_act":
self.model.add(BatchNormalization())
if self.hidden_activation == "prelu":
self.model.add(PReLU())
elif self.hidden_activation == "elu":
self.model.add(ELU())
else:
self.model.add(Activation(self.hidden_activation))
if self.batch_norm == "after_act":
self.model.add(BatchNormalization())
self.model.add(Dropout(self.hidden_dropout))
hidden_layers -= 1
## output layer
output_dim = 1
output_act = "linear"
self.model.add(Dense(output_dim))
self.model.add(Activation(output_act))
## loss
if self.optimizer == "sgd":
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss="mse", optimizer=sgd)
else:
self.model.compile(loss="mse", optimizer=self.optimizer)
## fit
self.model.fit(X, y,
nb_epoch=self.nb_epoch,
batch_size=self.batch_size,
validation_split=0, verbose=0)
return self
评论列表
文章目录