def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=112, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=112,output_dim=128, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=128,output_dim=68, init='he_normal'))
model.add(LeakyReLU(alpha=.00003))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(input_dim=68,output_dim=2, init='he_normal'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
python类LeakyReLU()的实例源码
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=310, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=310,output_dim=252, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=252,output_dim=128, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=128,output_dim=2, init='he_normal', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=62, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(Dropout(0.3))
model.add(Dense(input_dim=62,output_dim=158, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(Dropout(0.25))
model.add(Dense(input_dim=158,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=140,output_dim=250, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=250,output_dim=90, init='uniform', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=90,output_dim=2, init='uniform', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.013, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
def build_model(self):
model = Sequential()
model.add(Dense(64, input_shape=nn_input_dim_NN, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(Dropout(0.5))
model.add(Dense(2, init='he_normal'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-5, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=["accuracy"])
return KerasClassifier(nn=model,**self.params)
# ----- END first stage stacking model -----
# ----- Second stage stacking model -----
def build_model(self):
model = Sequential()
model.add(Dense(64, input_shape=nn_input_dim_NN, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(Dropout(0.5))
model.add(Dense(output_dim, init='he_normal'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-5, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
return KerasClassifier(nn=model,**self.params)
# ----- END first stage stacking model -----
# ----- Second stage stacking model -----
model_zoo.py 文件源码
项目:visual_turing_test-tutorial
作者: mateuszmalinowski
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def deep_mlp(self):
"""
Deep Multilayer Perceptrop.
"""
if self._config.num_mlp_layers == 0:
self.add(Dropout(0.5))
else:
for j in xrange(self._config.num_mlp_layers):
self.add(Dense(self._config.mlp_hidden_dim))
if self._config.mlp_activation == 'elu':
self.add(ELU())
elif self._config.mlp_activation == 'leaky_relu':
self.add(LeakyReLU())
elif self._config.mlp_activation == 'prelu':
self.add(PReLU())
else:
self.add(Activation(self._config.mlp_activation))
self.add(Dropout(0.5))
def Discriminator(image_size = 64):
L = int(image_size)
images = Input(shape = (L, L, 3))
x = Conv2D(64, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(images) # shape(L/2, L/2, 32)
x = LeakyReLU(0.2)(x)
x = Conv2D(128, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(x) # shape(L/4, L/4, 64)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(256, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(x) # shape(L/8, L/8, 128)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(512, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(x) # shape(L/16, L/16, 256)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
outputs = Dense(1)(x)
model = Model(inputs = images, outputs = outputs)
model.summary()
return model
def fc_block1(x, n=1000, d=0.5):
x = Dense(n)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(d)(x)
return x
def fc_identity(input_tensor, n=1000, d=0.5):
x = fc_block1(input_tensor, n, d)
x = Dense(int(input_tensor.shape[1]))(x)
x = merge([x, input_tensor], mode='sum', concat_axis=1)
x = LeakyReLU()(x)
return x
def fc_inception(input_tensor, n=3000, d=0.5):
br1 = Dense(n)(input_tensor)
br1 = LeakyReLU()(br1)
br1 = BatchNormalization()(br1)
br1 = Dropout(d)(br1)
br1 = Dense(int(n/3.0))(br1)
br2 = Dense(n)(input_tensor)
br2 = BatchNormalization()(br2)
br2 = ELU()(br2)
br2 = Dropout(d)(br2)
br2 = Dense(int(n/3.0))(br2)
br3 = Dense(int(n/3.0))(input_tensor)
br3 = BatchNormalization()(br3)
br3 = PReLU()(br3)
br3 = Dropout(d)(br3)
br3 = Dense(int(n/3.0))(br3)
br3 = BatchNormalization()(br3)
br3 = PReLU()(br3)
br3 = Dropout(d)(br3)
br3 = Dense(int(n/3.0))(br3)
br3 = BatchNormalization()(br3)
br3 = PReLU()(br3)
br3 = Dropout(d)(br3)
x = merge([br1, br2, br3], mode='concat', concat_axis=1)
return x
def create_critic_network(self, state_size,action_dim):
print("Now we build the model")
S = Input(shape=[state_size])
A = Input(shape=[action_dim],name='action2')
## Original Version
w1 = Dense(HIDDEN1_UNITS)(S)
w1 = LeakyReLU()(w1)
h1 = Dense(HIDDEN2_UNITS)(w1)
h1 = LeakyReLU()(h1)
a1 = Dense(HIDDEN2_UNITS)(A)
a1 = LeakyReLU()(a1)
h2 = layers.add([h1, a1])
h3 = Dense(HIDDEN2_UNITS)(h2)
h3 = LeakyReLU()(h3)
h3 = Dense(HIDDEN2_UNITS)(h3)
h3 = LeakyReLU()(h3)
h3 = Dense(HIDDEN1_UNITS)(h3)
h3 = LeakyReLU()(h3)
V = Dense(action_dim,activation='linear')(h3)
model = Model(inputs=[S,A],outputs=V)
adam = Adam(lr=self.LEARNING_RATE)
model.compile(loss='mse', optimizer=adam)
return model, A, S
def build_discriminator():
# build a relatively standard conv net, with LeakyReLUs as suggested in
# the reference paper
cnn = Sequential()
cnn.add(Conv2D(32, 3, padding='same', strides=2,
input_shape=(1, 28, 28)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(64, 3, padding='same', strides=1))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(128, 3, padding='same', strides=2))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(256, 3, padding='same', strides=1))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Flatten())
image = Input(shape=(1, 28, 28))
features = cnn(image)
# first output (name=generation) is whether or not the discriminator
# thinks the image that is being shown is fake, and the second output
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake = Dense(1, activation='sigmoid', name='generation')(features)
aux = Dense(10, activation='softmax', name='auxiliary')(features)
return Model(image, [fake, aux])
def build_discriminator():
# build a relatively standard conv net, with LeakyReLUs as suggested in
# the reference paper
cnn = Sequential()
cnn.add(Convolution2D(32, 3, 3, border_mode='same', subsample=(2, 2),
input_shape=(1, 28, 28)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Convolution2D(128, 3, 3, border_mode='same', subsample=(2, 2)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Convolution2D(256, 3, 3, border_mode='same', subsample=(1, 1)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Flatten())
image = Input(shape=(1, 28, 28))
features = cnn(image)
# first output (name=generation) is whether or not the discriminator
# thinks the image that is being shown is fake, and the second output
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake = Dense(1, activation='sigmoid', name='generation')(features)
aux = Dense(10, activation='softmax', name='auxiliary')(features)
return Model(input=image, output=[fake, aux])
def test_leaky_relu():
from keras.layers.advanced_activations import LeakyReLU
for alpha in [0., .5, -1.]:
layer_test(LeakyReLU, kwargs={'alpha': alpha},
input_shape=(2, 3, 4))
def build_discriminator():
# build a relatively standard conv net, with LeakyReLUs as suggested in
# the reference paper
cnn = Sequential()
cnn.add(Conv2D(32, 3, padding='same', strides=2,
input_shape=(1, 28, 28)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(64, 3, padding='same', strides=1))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(128, 3, padding='same', strides=2))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(256, 3, padding='same', strides=1))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Flatten())
image = Input(shape=(1, 28, 28))
features = cnn(image)
# first output (name=generation) is whether or not the discriminator
# thinks the image that is being shown is fake, and the second output
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake = Dense(1, activation='sigmoid', name='generation')(features)
aux = Dense(10, activation='softmax', name='auxiliary')(features)
return Model(image, [fake, aux])
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
# Merge noise and auxilary inputs
gen_input = Input(shape=(noise_dim,), name="noise_input")
aux_input = Input(shape=(aux_dim,), name="auxilary_input")
x = merge([gen_input, aux_input], mode="concat", concat_axis=-1)
# Dense Layer 1
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 2
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 3
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 4
x = Dense(400)(x)
x = BatchNormalization()(x)
x = Activation("tanh")(x)
generator_model = Model(input=[gen_input, aux_input], output=[x], name=model_name)
return generator_model
def discriminator_model(model_name="discriminator"):
# Merge noise and auxilary inputs
disc_input = Input(shape=(400,), name="discriminator_input")
aux_input = Input(shape=(47,), name="auxilary_input")
x = merge([disc_input, aux_input], mode="concat", concat_axis=-1)
# Dense Layer 1
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 2
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 3
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 4
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 5
x = Dense(1)(x)
x = Activation("sigmoid")(x)
discriminator_model = Model(input=[disc_input, aux_input], output=[x], name=model_name)
return discriminator_model
return discriminator_model
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
gen_input = Input(shape=(noise_dim,), name="noise_input")
# Dense Layer 1
x = Dense(1024)(gen_input)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 2
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 3
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 4
x = Dense(400)(x)
x = BatchNormalization()(x)
x = Activation("tanh")(x)
generator_model = Model(input=gen_input, output=[x], name=model_name)
return generator_model
def discriminator_model(model_name="discriminator"):
disc_input = Input(shape=(400,), name="discriminator_input")
# Dense Layer 1
x = Dense(1024)(disc_input)
x = LeakyReLU(0.2)(x)
# Dense Layer 2
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 3
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 4
x = Dense(1024)(x)
x = LeakyReLU(0.2)(x)
# Dense Layer 5
x = Dense(1)(x)
x = Activation("sigmoid")(x)
discriminator_model = Model(input=disc_input, output=[x], name=model_name)
return discriminator_model
return discriminator_model