kernel_one.py 文件源码

python
阅读 23 收藏 0 点赞 0 评论 0

项目:ml-projects 作者: saopayne 项目源码 文件源码
def model():
    model = Sequential()
    #input layer
    model.add(Dense(120, input_dim=input_dims)) #, kernel_constraint=maxnorm(5)
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.2)) # Reduce Overfitting With Dropout Regularization

    # hidden layers

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))

    model.add(Dense(120))
    model.add(Activation(act_func))
    model.add(Dropout(0.2))


    # output layer (y_pred)
    model.add(Dense(1, activation='linear'))
    # Use a large learning rate with decay and a large momentum. Increase your learning rate by a factor of 10 to 100 and use a high momentum value of 0.9 or 0.99
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    # compile this model
    model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
                  optimizer='adam',
                  metrics=[r2_keras] # you can add several if needed
                 )

    # Visualize NN architecture
    print(model.summary())
    return model

# initialize input dimension
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号