def build_discriminator(self):
img_shape = (self.img_rows, self.img_cols, self.channels)
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
python类Flatten()的实例源码
def build_encoder(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.latent_dim))
model.summary()
img = Input(shape=self.img_shape)
z = model(img)
return Model(img, z)
def build_discriminator(self):
z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
def build_model(self, dataset, nb_classes):
self.model = Sequential()
self.model.add(Convolution2D(32, (3, 3), padding='same', input_shape=dataset.x_train.shape[1:]))
self.model.add(Activation('relu'))
self.model.add(Convolution2D(32, (3, 3)))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Convolution2D(64, (3, 3), padding='same'))
self.model.add(Activation('relu'))
self.model.add(Convolution2D(64, (3, 3)))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(512))
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(nb_classes))
self.model.add(Activation('softmax'))
self.model.summary()
def get_model(img_channels, img_width, img_height, dropout=0.5):
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(
img_channels, img_width, img_height)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def get_model(shape, dropout=0.5, path=None):
print('building neural network')
model=Sequential()
model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape))
model.add(Activation('relu'))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SpatialDropout2D(dropout))
model.add(Flatten())#input_shape=shape))
# model.add(Dense(4096))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
#model.add(Activation('linear'))
return model
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
#model.add(Conv2D(256, kernel_size = (2,2), activation='relu', input_shape=(self.state_size.shape[0], self.state_size.shape[1],1), padding="same"))
#model.add(Conv2D(712, kernel_size = (2,2), activation='relu', padding="same"))
#model.add(Conv2D(128, kernel_size = (2,2), activation='relu', padding="same"))
model.add(Dense(2048, input_dim=5, activation='relu'))#self.state_size.shape[0] * self.state_size.shape[1]
#model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu'))
model.add(Dense(4, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
'''Set up the 3 conv layer keras model.
classes: Number of outputs.
inputShape: The input shape without the batch size.
includeTop: If you only want the whole net, or just the convolutions.
'''
inp = Input(shape=inputShape)
x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
if includeTop:
x = Flatten(name='flatten')(x)
x = Dense(512, activation='relu', name='dense1')(x)
out = Dense(nbClasses, activation='softmax', name='output')(x)
else:
out = x
model = Model(inp, out)
return model
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
'''Set up the 3 conv layer keras model.
classes: Number of outputs.
inputShape: The input shape without the batch size.
includeTop: If you only want the whole net, or just the convolutions.
'''
inp = Input(shape=inputShape)
x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
if includeTop:
x = Flatten(name='flatten')(x)
x = Dense(512, activation='relu', name='dense1')(x)
out = Dense(nbClasses, activation='softmax', name='output')(x)
else:
out = x
model = Model(inp, out)
return model
def test_find_activation_layer():
conv1_filters = 1
conv2_filters = 1
dense_units = 1
model = Sequential()
model.add(Conv2D(conv1_filters, [3, 3], input_shape=(28, 28, 1), data_format="channels_last", name='conv_1'))
model.add(Activation('relu', name='act_1'))
model.add(MaxPool2D((2, 2), name='pool_1'))
model.add(Conv2D(conv2_filters, [3, 3], data_format="channels_last", name='conv_2'))
model.add(Activation('relu', name='act_2'))
model.add(MaxPool2D((2, 2), name='pool_2'))
model.add(Flatten(name='flat_1'))
model.add(Dense(dense_units, name='dense_1'))
model.add(Activation('relu', name='act_3'))
model.add(Dense(10, name='dense_2'))
model.add(Activation('softmax', name='act_4'))
assert find_activation_layer(model.get_layer('conv_1'), 0) == (model.get_layer('act_1'), 0)
assert find_activation_layer(model.get_layer('conv_2'),
0) == (model.get_layer('act_2'), 0)
assert find_activation_layer(model.get_layer('dense_1'),
0) == (model.get_layer('act_3'), 0)
assert find_activation_layer(model.get_layer('dense_2'),
0) == (model.get_layer('act_4'), 0)
def model_cnn(net_layers, input_shape):
inp = Input(shape=input_shape)
model = inp
for cl in net_layers['conv_layers']:
model = Conv2D(filters=cl[0], kernel_size=cl[1], activation='relu')(model)
if cl[4]:
model = MaxPooling2D()(model)
if cl[2]:
model = BatchNormalization()(model)
if cl[3]:
model = Dropout(0.2)(model)
model = Flatten()(model)
for dl in net_layers['dense_layers']:
model = Dense(dl[0])(model)
model = Activation('relu')(model)
if dl[1]:
model = BatchNormalization()(model)
if dl[2]:
model = Dropout(0.2)(model)
model = Dense(1)(model)
model = Activation('sigmoid')(model)
model = Model(inp, model)
return model
# %%
# LSTM architecture
# conv_layers -> [(filters, kernel_size, BatchNormaliztion, Dropout, MaxPooling)]
# dense_layers -> [(num_neurons, BatchNormaliztion, Dropout)]
def build(input_shape, num_outputs,
block_fn, repetitions):
inputs = Input(shape = input_shape)
conv1 = Conv2D(64, (7, 7), strides = (2, 2),
padding = 'same')(inputs)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D(pool_size = (3, 3), strides = (2, 2),
padding = 'same')(conv1)
x = pool1
filters = 64
first_layer = True
for i, r in enumerate(repetitions):
x = _residual_block(block_fn, filters = filters,
repetitions = r, is_first_layer = first_layer)(x)
filters *= 2
if first_layer:
first_layer = False
# last activation <- unnecessary???
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
_, w, h, ch = K.int_shape(x)
pool2 = AveragePooling2D(pool_size = (w, h), strides = (1, 1))(x)
flat1 = Flatten()(pool2)
outputs = Dense(num_outputs, kernel_initializer = init,
activation = 'softmax')(flat1)
model = Model(inputs = inputs, outputs = outputs)
return model
example_gan_cifar10.py 文件源码
项目:Deep-Learning-with-Keras
作者: PacktPublishing
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def model_discriminator():
nch = 256
h = 5
reg = lambda: l1l2(l1=1e-7, l2=1e-7)
c1 = Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg(),
input_shape=dim_ordering_shape((3, 32, 32)))
c2 = Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg())
c3 = Convolution2D(nch, h, h, border_mode='same', W_regularizer=reg())
c4 = Convolution2D(1, h, h, border_mode='same', W_regularizer=reg())
model = Sequential()
model.add(c1)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(0.2))
model.add(c2)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(0.2))
model.add(c3)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(0.2))
model.add(c4)
model.add(AveragePooling2D(pool_size=(4, 4), border_mode='valid'))
model.add(Flatten())
model.add(Activation('sigmoid'))
return model
def cnn3adam_slim(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
model = Sequential(name='cnn3adam')
model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam())
return model
def cnn3adam_filter(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('use L2 model instead!')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
model = Sequential(name='cnn3adam_filter')
model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten(name='conv3'))
model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization(name='fc1'))
model.add(Dropout(0.5))
model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization(name='fc2'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
return model
def cnn3adam_filter_l2(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('use more L2 model instead!')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
model = Sequential(name='cnn3adam_filter_l2')
model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape,
kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten(name='conv3'))
model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1'))
model.add(BatchNormalization(name='bn1'))
model.add(Dropout(0.5, name='do1'))
model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2'))
model.add(BatchNormalization(name='bn2'))
model.add(Dropout(0.5, name='do2'))
model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
# print('reset learning rate')
return model
def cnn3adam_filter_morel2_slim(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
model = Sequential(name='cnn3adam_filter_morel2_slim')
model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape,
kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten(name='conv3'))
model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1'))
model.add(BatchNormalization(name='bn1'))
model.add(Dropout(0.5, name='do1'))
model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2'))
model.add(BatchNormalization(name='bn2'))
model.add(Dropout(0.5, name='do2'))
model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
# print('reset learning rate')
return model
def cnn1d(input_shape, n_classes ):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 1)
"""
model = Sequential(name='1D CNN')
model.add(Conv1D (kernel_size = (50), filters = 150, strides=5, input_shape=input_shape, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
print(model.output_shape)
model.add(Conv1D (kernel_size = (8), filters = 200, strides=2, input_shape=input_shape, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
print(model.output_shape)
model.add(MaxPooling1D(pool_size = (10), strides=(2)))
print(model.output_shape)
model.add(Conv1D (kernel_size = (8), filters = 400, strides=2, input_shape=input_shape, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
print(model.output_shape)
model.add(Flatten())
model.add(Dense (700, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense (700, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy])
return model
def cnn1(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
model = Sequential(name='no_MP_small_filters')
model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (10), filters = 150, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense (1024, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense (1024, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
return model
def cnn2(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
model = Sequential(name='MP_small_filters')
model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense (500, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense (500, activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
return model