def make_generator():
"""Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
of size 28x28x1."""
model = Sequential()
model.add(Dense(1024, input_dim=100))
model.add(LeakyReLU())
model.add(Dense(128 * 7 * 7))
model.add(BatchNormalization())
model.add(LeakyReLU())
if K.image_data_format() == 'channels_first':
model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
bn_axis = 1
else:
model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
bn_axis = -1
model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
model.add(BatchNormalization(axis=bn_axis))
model.add(LeakyReLU())
model.add(Convolution2D(64, (5, 5), padding='same'))
model.add(BatchNormalization(axis=bn_axis))
model.add(LeakyReLU())
model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
model.add(BatchNormalization(axis=bn_axis))
model.add(LeakyReLU())
# Because we normalized training inputs to lie in the range [-1, 1],
# the tanh function should be used for the output of the generator to ensure its output
# also lies in this range.
model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
return model
python类LeakyReLU()的实例源码
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
# Merge noise and auxilary inputs
gen_input = Input(shape=(noise_dim,), name="noise_input")
aux_input = Input(shape=(aux_dim,), name="auxilary_input")
x = concatenate([gen_input, aux_input], axis=-1)
# Dense Layer 1
x = Dense(10 * 100)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x) # output shape is 10*100
# Reshape the tensors to support CNNs
x = Reshape((100, 10))(x) # shape is 100 x 10
# Conv Layer 1
x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x) # output shape is 100 x 250
x = UpSampling1D(size=2)(x) # output shape is 200 x 250
# Conv Layer 2
x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x) # output shape is 200 x 100
x = UpSampling1D(size=2)(x) # output shape is 400 x 100
# Conv Layer 3
x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('tanh')(x) # final output shape is 400 x 1
generator_model = Model(
outputs=[x], inputs=[gen_input, aux_input], name=model_name)
return generator_model
def make_discriminator():
"""Creates a discriminator model that takes an image as input and outputs a single value, representing whether
the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
Instead, the output should be as large and negative as possible for generated inputs and as large and positive
as possible for real inputs.
Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""
model = Sequential()
if K.image_data_format() == 'channels_first':
model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
else:
model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
model.add(LeakyReLU())
model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2]))
model.add(LeakyReLU())
model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2]))
model.add(LeakyReLU())
model.add(Flatten())
model.add(Dense(1024, kernel_initializer='he_normal'))
model.add(LeakyReLU())
model.add(Dense(1, kernel_initializer='he_normal'))
return model
def build_model():
"""
????
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
co_lstm_predict_sequence.py 文件源码
项目:copper_price_forecast
作者: liyinwei
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def build_model():
"""
????
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
def test_keras_export(self):
tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
'keras_export_test.json'), 'r')
response = json.load(tests)
tests.close()
net = yaml.safe_load(json.dumps(response['net']))
net = {'l0': net['Input'], 'l1': net['ReLU']}
# Test 1
net['l0']['connection']['output'].append('l1')
inp = data(net['l0'], '', 'l0')['l0']
temp = activation(net['l1'], [inp], 'l1')
model = Model(inp, temp['l1'])
self.assertEqual(model.layers[1].__class__.__name__, 'Activation')
# Test 2
net['l1']['params']['negative_slope'] = 1
net['l0']['connection']['output'].append('l1')
inp = data(net['l0'], '', 'l0')['l0']
temp = activation(net['l1'], [inp], 'l1')
model = Model(inp, temp['l1'])
self.assertEqual(model.layers[1].__class__.__name__, 'LeakyReLU')
def make_dcgan_discriminator(Xk_d):
x = Convolution2D(nb_filter=64, nb_row=5, nb_col=5, subsample=(2,2),
activation=None, border_mode='same', init='glorot_uniform',
dim_ordering='th')(Xk_d)
x = BatchNormalization(mode=2, axis=1)(x)
x = LeakyReLU(0.2)(x)
x = Convolution2D(nb_filter=128, nb_row=5, nb_col=5, subsample=(2,2),
activation=None, border_mode='same', init='glorot_uniform',
dim_ordering='th')(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
x = Dense(1024)(x)
x = BatchNormalization(mode=2)(x)
x = LeakyReLU(0.2)(x)
d = Dense(1, activation=None)(x)
return d
def make_dcgan_discriminator(Xk_d):
x = Convolution2D(nb_filter=64, nb_row=4, nb_col=4, subsample=(2,2),
activation=None, border_mode='same', init=conv2D_init,
dim_ordering='th')(Xk_d)
# x = BatchNormalization(mode=2, axis=1)(x) # <- makes things much worse!
x = LeakyReLU(0.2)(x)
x = Convolution2D(nb_filter=128, nb_row=4, nb_col=4, subsample=(2,2),
activation=None, border_mode='same', init=conv2D_init,
dim_ordering='th')(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
x = Dense(1024, init=conv2D_init)(x)
x = BatchNormalization(mode=2)(x)
x = LeakyReLU(0.2)(x)
d = Dense(1, activation=None)(x)
return d
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.missing_shape)
validity = model(img)
return Model(img, validity)
def build_discriminator(self):
img_shape = (self.img_rows, self.img_cols, self.channels)
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def build_discriminator(self):
model = Sequential()
model.add(Dense(512, input_dim=self.encoded_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1, activation="sigmoid"))
model.summary()
encoded_repr = Input(shape=(self.encoded_dim, ))
validity = model(encoded_repr)
return Model(encoded_repr, validity)
def build_encoder(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.latent_dim))
model.summary()
img = Input(shape=self.img_shape)
z = model(img)
return Model(img, z)
def build_discriminator(self):
z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
def _adversary():
model = Sequential()
model.add(Convolution2D(
64, 5, 5,
border_mode='same',
input_shape=(3, 32, 32),subsample=(2,2)))
model.add(LeakyReLU(0.2))
model.add(Convolution2D(128, 5, 5,subsample=(2,2)))
model.add(BatchNormalization(mode=2))
model.add(LeakyReLU(0.2))
model.add(Flatten())
model.add(Dense(1024))
model.add(LeakyReLU())
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def transform_model(weight_loss_pix=5e-4):
inputs = Input(shape=( 128, 128, 3))
x1 = Convolution2D(64, 5, 5, border_mode='same')(inputs)
x2 = LeakyReLU(alpha=0.3, name='wkcw')(x1)
x3 = BatchNormalization()(x2)
x4 = Convolution2D(128, 4, 4, border_mode='same', subsample=(2,2))(x3)
x5 = LeakyReLU(alpha=0.3)(x4)
x6 = BatchNormalization()(x5)
x7 = Convolution2D(256, 4, 4, border_mode='same', subsample=(2,2))(x6)
x8 = LeakyReLU(alpha=0.3)(x7)
x9 = BatchNormalization()(x8)
x10 = Deconvolution2D(128, 3, 3, output_shape=(None, 64, 64, 128), border_mode='same', subsample=(2,2))(x9)
x11 = BatchNormalization()(x10)
x12 = Deconvolution2D(64, 3, 3, output_shape=(None, 128, 128, 64), border_mode='same', subsample=(2,2))(x11)
x13 = BatchNormalization()(x12)
x14 = Deconvolution2D(3, 4, 4, output_shape=(None, 128, 128, 3), border_mode='same', activity_regularizer=activity_l1(weight_loss_pix))(x13)
output = merge([inputs, x14], mode='sum')
model = Model(input=inputs, output=output)
return model
def seqCNN_BN(n_flow=4, seq_len=3, map_height=32, map_width=32):
model=Sequential()
model.add(Convolution2D(64, 3, 3, input_shape=(n_flow*seq_len, map_height, map_width), border_mode='same'))
model.add(LeakyReLU(0.2))
model.add(BatchNormalization())
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(LeakyReLU(0.2))
model.add(BatchNormalization())
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(LeakyReLU(0.2))
model.add(BatchNormalization())
model.add(Convolution2D(n_flow, 3, 3, border_mode='same'))
model.add(Activation('tanh'))
return model
def seqCNN_LReLU(n_flow=4, seq_len=3, map_height=32, map_width=32):
model=Sequential()
model.add(Convolution2D(64, 3, 3, input_shape=(n_flow*seq_len, map_height, map_width), border_mode='same'))
model.add(LeakyReLU(0.2))
# model.add(BatchNormalization())
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(LeakyReLU(0.2))
# model.add(BatchNormalization())
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(LeakyReLU(0.2))
# model.add(BatchNormalization())
model.add(Convolution2D(n_flow, 3, 3, border_mode='same'))
model.add(Activation('tanh'))
return model
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
L_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
L_1 = LeakyReLU(alpha=0.25)(L_1)
L_2=L_1
for i in range(3):
L_2 = residual_block(L_2, 64,3)
L_3 = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(L_2)
L_3 = BatchNormalization(axis=-1)(L_3)
L_3 = add([L_1,L_3])
L_4= Conv2D(128, (1, 1), padding='same',kernel_initializer='glorot_uniform')(L_3)
op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(L_4)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
x_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
x_1 = LeakyReLU(alpha=0.25)(x_1)
x=x_1
for i in range(5):#or 15
x = residual_block(x, 64,3)
x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x)
x = BatchNormalization(axis=-1)(x)
x = add([x_1,x])
x=upscale(x)
op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
x = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
x = BatchNormalization(axis= -1)(x)
x = LeakyReLU(alpha=0.25)(x)
for i in range(5):
x = residual_block(x, 64,3)
x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x)
x = BatchNormalization(axis=-1)(x)
x=Conv2D(64,(3, 3),padding='same',activation='relu')(x)
op=Conv2D(img_channel,(9,9),padding='same',activation='tanh',kernel_initializer='glorot_uniform')(x)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
#plot_model(deblocking, to_file='model.png', show_shapes=True, show_layer_names=True)
def createModel(self, inputs, outputs, hiddenLayers, activationType):
model = Sequential()
if len(hiddenLayers) == 0:
model.add(Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform'))
model.add(Activation("linear"))
else :
model.add(Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform'))
if (activationType == "LeakyReLU") :
model.add(LeakyReLU(alpha=0.01))
else :
model.add(Activation(activationType))
for index in range(1, len(hiddenLayers)-1):
layerSize = hiddenLayers[index]
model.add(Dense(layerSize, init='lecun_uniform'))
if (activationType == "LeakyReLU") :
model.add(LeakyReLU(alpha=0.01))
else :
model.add(Activation(activationType))
model.add(Dense(self.output_size, init='lecun_uniform'))
model.add(Activation("linear"))
optimizer = optimizers.RMSprop(lr=1, rho=0.9, epsilon=1e-06)
model.compile(loss="mse", optimizer=optimizer)
return model
def createModel(self, inputs, outputs, hiddenLayers, activationType):
model = Sequential()
if len(hiddenLayers) == 0:
model.add(Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform'))
model.add(Activation("linear"))
else :
model.add(Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform'))
if (activationType == "LeakyReLU") :
model.add(LeakyReLU(alpha=0.01))
else :
model.add(Activation(activationType))
for index in range(1, len(hiddenLayers)-1):
layerSize = hiddenLayers[index]
model.add(Dense(layerSize, init='lecun_uniform'))
if (activationType == "LeakyReLU") :
model.add(LeakyReLU(alpha=0.01))
else :
model.add(Activation(activationType))
model.add(Dense(self.output_size, init='lecun_uniform'))
model.add(Activation("linear"))
optimizer = optimizers.RMSprop(lr=1, rho=0.9, epsilon=1e-06)
model.compile(loss="mse", optimizer=optimizer)
return model
def get_model():
model = Sequential()
model.add(Dense(1024, init='normal', input_dim = 460))
model.add(LeakyReLU(0.3))
model.add(Dropout(0.5))
model.add(Dense(1024, init='normal'))
model.add(LeakyReLU(0.3))
model.add(Dropout(0.5))
model.add(Dense(512, init='normal'))
model.add(LeakyReLU(0.3))
model.add(Dropout(0.5))
model.add(Dense(1, init='normal'))
return model
def initAgent(neurons=512, layers=1, lr=1e-3,
moment=0.9, width=19, alpha=0.1):
"""Initialize agent: specify num of neurons and hidden layers"""
model = Sequential()
model.add(Dense(2 * width**2, init='lecun_uniform',
input_shape=(2 * width**2,)))
model.add(LeakyReLU(alpha=alpha))
for i in range(layers):
model.add(Dense(neurons, init='lecun_uniform'))
model.add(LeakyReLU(alpha=alpha))
model.add(Dropout(0.2))
model.add(Dense(width**2, init='lecun_uniform'))
# use linear output layer to generate real-valued outputs
model.add(Activation('linear'))
# opt = RMSprop(lr=lr)
opt = SGD(lr=lr, momentum=moment, decay=1e-18, nesterov=False)
model.compile(loss='mse', optimizer=opt)
return model
def __initial_conv_block_imagenet(input, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the inception resnext
Args:
input: input tensor
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
def conv_block(x0, scale):
x = Conv2D(int(64*scale), (1, 1))(x0)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(int(256*scale), (1, 1))(x)
x = InstanceNormalization()(x)
x1 = Conv2D(int(256*scale), (1, 1))(x0)
x1 = InstanceNormalization()(x1)
x = Add()([x, x1])
x = LeakyReLU()(x)
return x
def mnist_generator(input_shape=(28, 28, 1), scale=1/4):
x0 = Input(input_shape)
x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = residual_block(x, scale, num_id=2)
x = residual_block(x, scale*2, num_id=3)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(int(1024*scale), (1, 1))(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(1, (1, 1), activation='sigmoid')(x)
return Model(x0, x)
def mnist_discriminator(input_shape=(28, 28, 1), scale=1/4):
x0 = Input(input_shape)
x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = residual_block(x, scale, num_id=2)
x = residual_block(x, scale*2, num_id=3)
x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(1, (3, 3), strides=(2, 2), padding='same')(x)
x = GlobalAveragePooling2D()(x) # Flatten
x = Activation('sigmoid')(x)
return Model(x0, x)
def create_network(**kwargs):
defaults = {"timesteps": 128, "data_dim": 15}
params = defaults
params.update(**kwargs)
network = Sequential()
network.add(LSTM(output_dim=16,
activation='sigmoid',
inner_activation='hard_sigmoid',
input_shape=(params['timesteps'], params['data_dim']
)))
network.add(Dropout(0.15))
network.add(Dense(1))
# network.add(LeakyReLU(alpha=0.5))
network.add(Activation('relu'))
network.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
return network
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=120, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=120,output_dim=280, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=280,output_dim=100, init='uniform', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=100,output_dim=2, init='uniform', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.015, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)