def create_Kao_Onet( weight_path = 'model48.h5'):
input = Input(shape = [48,48,3])
x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1,2],name='prelu1')(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1,2],name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1,2],name='prelu3')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
x = PReLU(shared_axes=[1,2],name='prelu4')(x)
x = Permute((3,2,1))(x)
x = Flatten()(x)
x = Dense(256, name='conv5') (x)
x = PReLU(name='prelu5')(x)
classifier = Dense(2, activation='softmax',name='conv6-1')(x)
bbox_regress = Dense(4,name='conv6-2')(x)
landmark_regress = Dense(10,name='conv6-3')(x)
model = Model([input], [classifier, bbox_regress, landmark_regress])
model.load_weights(weight_path, by_name=True)
return model
python类PReLU()的实例源码
def build_network(num_actions, agent_history_length, resized_width, resized_height):
state = tf.placeholder("float", [None, agent_history_length, resized_width, resized_height])
inputs_v = Input(shape=(agent_history_length, resized_width, resized_height,))
#model_v = Permute((2, 3, 1))(inputs_v)
model_v = Convolution2D(nb_filter=16, nb_row=8, nb_col=8, subsample=(4,4), activation='relu', border_mode='same')(inputs_v)
model_v = Convolution2D(nb_filter=32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu', border_mode='same')(model_v)
model_v = Flatten()(model_v)
model_v = Dense(output_dim=512)(model_v)
model_v = PReLU()(model_v)
action_probs = Dense(name="p", output_dim=num_actions, activation='softmax')(model_v)
state_value = Dense(name="v", output_dim=1, activation='linear')(model_v)
value_network = Model(input=inputs_v, output=[state_value, action_probs])
return state, value_network
def create_Kao_Rnet (weight_path = 'model24.h5'):
input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input
x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)
x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
x = Permute((3, 2, 1))(x)
x = Flatten()(x)
x = Dense(128, name='conv4')(x)
x = PReLU( name='prelu4')(x)
classifier = Dense(2, activation='softmax', name='conv5-1')(x)
bbox_regress = Dense(4, name='conv5-2')(x)
model = Model([input], [classifier, bbox_regress])
model.load_weights(weight_path, by_name=True)
return model
def build(inp, dropout_rate=0.01):
enet = initial_block(inp)
enet = BatchNormalization(momentum=0.1)(enet) # enet_unpooling uses momentum of 0.1, keras default is 0.99
enet = PReLU(shared_axes=[1, 2])(enet)
enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate) # bottleneck 1.0
for _ in range(4):
enet = bottleneck(enet, 64, dropout_rate=dropout_rate) # bottleneck 1.i
enet = bottleneck(enet, 128, downsample=True) # bottleneck 2.0
# bottleneck 2.x and 3.x
for _ in range(2):
enet = bottleneck(enet, 128) # bottleneck 2.1
enet = bottleneck(enet, 128, dilated=2) # bottleneck 2.2
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.3
enet = bottleneck(enet, 128, dilated=4) # bottleneck 2.4
enet = bottleneck(enet, 128) # bottleneck 2.5
enet = bottleneck(enet, 128, dilated=8) # bottleneck 2.6
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.7
enet = bottleneck(enet, 128, dilated=16) # bottleneck 2.8
return enet
def build(inp, dropout_rate=0.01):
pooling_indices = []
enet, indices_single = initial_block(inp)
enet = BatchNormalization(momentum=0.1)(enet) # enet_unpooling uses momentum of 0.1, keras default is 0.99
enet = PReLU(shared_axes=[1, 2])(enet)
pooling_indices.append(indices_single)
enet, indices_single = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate) # bottleneck 1.0
pooling_indices.append(indices_single)
for _ in range(4):
enet = bottleneck(enet, 64, dropout_rate=dropout_rate) # bottleneck 1.i
enet, indices_single = bottleneck(enet, 128, downsample=True) # bottleneck 2.0
pooling_indices.append(indices_single)
# bottleneck 2.x and 3.x
for _ in range(2):
enet = bottleneck(enet, 128) # bottleneck 2.1
enet = bottleneck(enet, 128, dilated=2) # bottleneck 2.2
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.3
enet = bottleneck(enet, 128, dilated=4) # bottleneck 2.4
enet = bottleneck(enet, 128) # bottleneck 2.5
enet = bottleneck(enet, 128, dilated=8) # bottleneck 2.6
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.7
enet = bottleneck(enet, 128, dilated=16) # bottleneck 2.8
return enet, pooling_indices
def build_model():
"""
????
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
co_lstm_predict_sequence.py 文件源码
项目:copper_price_forecast
作者: liyinwei
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def build_model():
"""
????
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
def GatedPixelCNN(input_shape, filters, depth, latent=None, build=True):
height, width, channels = input_shape
palette = 256 # TODO: Make it scalable to any amount of palette.
input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')
latent_vector = None
if latent is not None:
latent_vector = Input(shape=(latent,), name='latent_vector')
model = GatedCNNs(filters, depth, latent_vector)(*GatedCNN(filters, latent_vector)(input_img))
for _ in range(2):
model = Convolution2D(filters, 1, 1, border_mode='valid')(model)
model = PReLU()(model)
outs = OutChannels(*input_shape, masked=False, palette=palette)(model)
if build:
model = Model(input=[input_img, latent_vector] if latent is not None else input_img, output=outs)
model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')
return model
def PixelCNN(input_shape, filters, depth, build=True):
height, width, channels = input_shape
palette = 256 # TODO: Make it scalable to any amount of palette.
input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')
model = MaskedConvolution2D(filters, 7, 7, mask='A', border_mode='same', name='masked2d_A')(input_img)
model = ResidualBlockList(filters, depth)(model)
model = PReLU()(model)
for _ in range(2):
model = MaskedConvolution2D(filters, 1, 1, border_mode='valid')(model)
model = PReLU()(model)
outs = OutChannels(*input_shape, masked=True, palette=palette)(model)
if build:
model = Model(input=input_img, output=outs)
model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')
return model
def create_base_model(nb_features, nb_classes, learning_rate=0.02):
model = Sequential()
# input layer + first hidden layer
model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,)))
model.add(PReLU())
model.add(Dropout(0.5))
# additional hidden layer
model.add(Dense(512, kernel_initializer='lecun_uniform'))
model.add(PReLU())
model.add(Dropout(0.75))
# output layer
model.add(Dense(nb_classes, kernel_initializer='lecun_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=learning_rate), metrics=['accuracy'])
return model
def nn_mlp(input_shape, params):
model = Sequential()
for i, layer_size in enumerate(params['layers']):
reg = regularizer(params)
if i == 0:
model.add(Dense(layer_size, init='he_normal', W_regularizer=reg, input_shape=input_shape))
else:
model.add(Dense(layer_size, init='he_normal', W_regularizer=reg))
if params.get('batch_norm', False):
model.add(BatchNormalization())
if 'dropouts' in params:
model.add(Dropout(params['dropouts'][i]))
model.add(PReLU())
model.add(Dense(1, init='he_normal'))
return model
model_fit_history.py 文件源码
项目:Exoplanet-Artificial-Intelligence
作者: pearsonkyle
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def make_wave(maxlen):
model = Sequential()
# conv1
model.add(Dense(64,input_dim=maxlen, kernel_initializer='he_normal',bias_initializer='zeros' ) )
model.add(PRELU())
model.add(Dropout(0.25))
model.add(Dense(32))
model.add(PRELU())
model.add(Dense(8))
model.add(PRELU())
model.add(Dense(1))
model.add(Activation('sigmoid'))
SGDsolver = SGD(lr=0.1, momentum=0.25, decay=0.0001, nesterov=True)
model.compile(loss='binary_crossentropy',
optimizer=SGDsolver,
metrics=['accuracy'])
return model
def nn_model(dims):
model = Sequential()
model.add(Dense(400, input_dim=dims, init='he_normal'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(200, init='he_normal'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(50, init='he_normal'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1, init='he_normal'))
model.compile(loss = 'mae', optimizer = 'adadelta')
return(model)
def nn_model():
model = Sequential()
model.add(Dense(400, input_dim = xtrain.shape[1], init = 'he_normal')) #400
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(120, init = 'he_normal')) #200
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(30, init = 'he_normal')) #50
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.1)) #0.2
model.add(Dense(1, init = 'he_normal'))
model.compile(loss = 'mae', optimizer = 'adadelta')
return(model)
def nn_model():
model = Sequential()
model.add(Dense(425, input_dim = xtrain.shape[1], init = 'he_normal')) #425
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.4)) #0.4
model.add(Dense(200, init = 'he_normal')) #225
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.3)) #0.3
model.add(Dense(40, init = 'he_normal')) #60
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.15)) #0.1
model.add(Dense(1, init = 'he_normal'))
model.compile(loss = 'mae', optimizer = 'adam')
return(model)
def nn_model():
model = Sequential()
model.add(Dense(450, input_dim = xtrain.shape[1], init = 'he_normal')) #400
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(225, init = 'he_normal')) #220
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.25)) #0.2
model.add(Dense(60, init = 'he_normal')) #50
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.15)) #0.1
model.add(Dense(1, init = 'he_normal'))
model.compile(loss = 'mae', optimizer = 'eve')
return(model)
def create_net():
model = Sequential()
model.add(Dense(400, input_dim = X_train.shape[1], init = 'he_normal'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(200, init = 'he_normal'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(50, init = 'he_normal'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(output_dim=10, init = 'he_normal'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd',
metrics=['categorical_accuracy'])
return(model)
def MLP(opt='nadam'):
X_raw=Input(shape=(LEN_RAW_INPUT,),name='input_raw')
fc1=BatchNormalization()(X_raw)
fc1=Dense(512)(fc1)
fc1=PReLU()(fc1)
fc1=Dropout(0.25)(fc1)
fc1=BatchNormalization()(fc1)
fc1=Dense(256)(fc1)
fc1=PReLU()(fc1)
fc1=Dropout(0.15)(fc1)
fc1=BatchNormalization()(fc1)
auxiliary_output_dense = Dense(1, activation='sigmoid', name='aux_output_dense')(fc1)
output_all = Dense(1,activation='sigmoid',name='output')(fc1)
model=Model(input=X_raw,output=output_all)
model.compile(
optimizer=opt,
loss = 'binary_crossentropy')
return model
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=310, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=310,output_dim=252, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=252,output_dim=128, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=128,output_dim=2, init='he_normal', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=62, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(Dropout(0.3))
model.add(Dense(input_dim=62,output_dim=158, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(Dropout(0.25))
model.add(Dense(input_dim=158,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=380, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=105,output_dim=280, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=280,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=180, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=180,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=50,output_dim=30, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=30,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=360, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=360,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=110,output_dim=350, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=350,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(input_dim=110,output_dim=300, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=300,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.1))
model.add(Dense(input_dim=100,output_dim=300, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=300,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=105,output_dim=200, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=200,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=140,output_dim=380, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=360, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=360,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.007, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)