def make_deep_learning_model(hidden_layers=None, num_cols=None, optimizer='Adadelta', dropout_rate=0.2, weight_constraint=0, feature_learning=False, kernel_initializer='normal', activation='elu'):
if feature_learning == True and hidden_layers is None:
hidden_layers = [1, 0.75, 0.25]
if hidden_layers is None:
hidden_layers = [1, 0.75, 0.25]
# The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
scaled_layers = []
for layer in hidden_layers:
scaled_layers.append(min(int(num_cols * layer), 10))
# If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
if feature_learning == True:
scaled_layers.append(10)
model = Sequential()
model.add(Dense(scaled_layers[0], input_dim=num_cols, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(0.01)))
model.add(get_activation_layer(activation))
for layer_size in scaled_layers[1:-1]:
model.add(Dense(layer_size, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(0.01)))
model.add(get_activation_layer(activation))
# There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
model.add(Dense(scaled_layers[-1], kernel_initializer=kernel_initializer, name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
model.add(get_activation_layer(activation))
# For regressors, we want an output layer with a single node
model.add(Dense(1, kernel_initializer=kernel_initializer))
# The final step is to compile the model
model.compile(loss='mean_squared_error', optimizer=get_optimizer(optimizer), metrics=['mean_absolute_error', 'mean_absolute_percentage_error'])
return model
评论列表
文章目录