def tsinalis(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
"""
model = Sequential(name='Tsinalis')
model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu'))
print(model.input_shape)
print(model.output_shape)
model.add(MaxPooling1D(pool_size = (20), strides=(10)))
print(model.output_shape)
model.add(keras.layers.core.Reshape([20,-1,1]))
print(model.output_shape)
model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu'))
print(model.output_shape)
model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2)))
print(model.output_shape)
model.add(Flatten())
print(model.output_shape)
model.add(Dense (500, activation='relu'))
model.add(Dense (500, activation='relu'))
model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() ))
model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy])
return model
python类Flatten()的实例源码
def create_Kao_Onet( weight_path = 'model48.h5'):
input = Input(shape = [48,48,3])
x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1,2],name='prelu1')(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1,2],name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1,2],name='prelu3')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
x = PReLU(shared_axes=[1,2],name='prelu4')(x)
x = Permute((3,2,1))(x)
x = Flatten()(x)
x = Dense(256, name='conv5') (x)
x = PReLU(name='prelu5')(x)
classifier = Dense(2, activation='softmax',name='conv6-1')(x)
bbox_regress = Dense(4,name='conv6-2')(x)
landmark_regress = Dense(10,name='conv6-3')(x)
model = Model([input], [classifier, bbox_regress, landmark_regress])
model.load_weights(weight_path, by_name=True)
return model
def make_teacher_model(train_data, validation_data, nb_epoch=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, 3, input_shape=input_shape,
border_mode='same', name='conv1'))
model.add(MaxPooling2D(name='pool1'))
model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
model.add(MaxPooling2D(name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(nb_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
validation_data=validation_data)
return model, history
def build_network(num_actions, agent_history_length, resized_width, resized_height):
state = tf.placeholder("float", [None, agent_history_length, resized_width, resized_height])
inputs_v = Input(shape=(agent_history_length, resized_width, resized_height,))
#model_v = Permute((2, 3, 1))(inputs_v)
model_v = Convolution2D(nb_filter=16, nb_row=8, nb_col=8, subsample=(4,4), activation='relu', border_mode='same')(inputs_v)
model_v = Convolution2D(nb_filter=32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu', border_mode='same')(model_v)
model_v = Flatten()(model_v)
model_v = Dense(output_dim=512)(model_v)
model_v = PReLU()(model_v)
action_probs = Dense(name="p", output_dim=num_actions, activation='softmax')(model_v)
state_value = Dense(name="v", output_dim=1, activation='linear')(model_v)
value_network = Model(input=inputs_v, output=[state_value, action_probs])
return state, value_network
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10):
h, w, nch = input_shape
assert h == w, 'expect input shape (h, w, nch), h == w'
images = Input(shape = (h, h, nch))
x = Conv2D(64, (4, 4), strides = (1, 1),
kernel_initializer = init, padding = 'same')(images)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size = (2, 2))(x)
x = Conv2D(128, (4, 4), strides = (1, 1),
kernel_initializer = init, padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size = (2, 2))(x)
x = Flatten()(x)
outputs = Dense(num_output, kernel_initializer = init,
activation = 'softmax')(x)
model = Model(inputs = images, outputs = outputs)
return model
example_gan_convolutional.py 文件源码
项目:Deep-Learning-with-Keras
作者: PacktPublishing
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5):
d_input = dim_ordering_input(input_shape, name="input_x")
nch = 512
# nch = 128
H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(int(nch / 2))(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
d_V = Dense(1, activation='sigmoid')(H)
return Model(d_input, d_V)
def create_Kao_Rnet (weight_path = 'model24.h5'):
input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input
x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)
x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
x = Permute((3, 2, 1))(x)
x = Flatten()(x)
x = Dense(128, name='conv4')(x)
x = PReLU( name='prelu4')(x)
classifier = Dense(2, activation='softmax', name='conv5-1')(x)
bbox_regress = Dense(4, name='conv5-2')(x)
model = Model([input], [classifier, bbox_regress])
model.load_weights(weight_path, by_name=True)
return model
def fGRU_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')
wordInp = Flatten()(wordInputs)
wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp)
hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)
head = GlobalAveragePooling1D()(hij)
v6 = Dense(1, activation="sigmoid", name="dense")(head)
model = Model(inputs=[wordInputs] , outputs=[v6])
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def fGlove_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')
wordInp = Flatten()(wordInputs)
wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp)
head = GlobalAveragePooling1D()(wordEmbedding)
v6 = Dense(1, activation="sigmoid", name="dense")(head)
model = Model(inputs=[wordInputs] , outputs=[v6])
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):
# compile times tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 14
input_shape = (batch_size,14,14,2048)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (batch_size,2048,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
out = TimeDistributed(Flatten())(out_roi_pool)
# out = TimeDistributed(Dropout(0.4))(out)
# out = TimeDistributed(Dense(2048,activation='relu'))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):
# compile times tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 14
input_shape = (batch_size,14,14,512)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (batch_size,512,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
out = TimeDistributed(Flatten())(out_roi_pool)
out = TimeDistributed(Dense(4096,activation='relu'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096,activation='relu'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):
# compile times tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 14
input_shape = (batch_size,14,14,1024)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (batch_size,1024,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
out = TimeDistributed(Flatten())(out_roi_pool)
out = TimeDistributed(Dense(4096,activation='relu'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096,activation='relu'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 7
input_shape = (num_rois,7,7,512)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois,512,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
def create_actor_network(self, state_size, action_dim):
"""Create actor network."""
print ("[MESSAGE] Build actor network.""")
S = Input(shape=state_size)
h_0 = Conv2D(32, (3, 3), padding="same",
kernel_regularizer=l2(0.0001),
activation="relu")(S)
h_1 = Conv2D(32, (3, 3), padding="same",
kernel_regularizer=l2(0.0001),
activation="relu")(h_0)
h_1 = AveragePooling2D(2, 2)(h_1)
h_1 = Flatten()(h_1)
h_1 = Dense(600, activation="relu")(h_1)
A = Dense(action_dim, activation="softmax")(h_1)
model = Model(inputs=S, outputs=A)
return model, model.trainable_weights, S
def make_discriminator():
"""Creates a discriminator model that takes an image as input and outputs a single value, representing whether
the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
Instead, the output should be as large and negative as possible for generated inputs and as large and positive
as possible for real inputs.
Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""
model = Sequential()
if K.image_data_format() == 'channels_first':
model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
else:
model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
model.add(LeakyReLU())
model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2]))
model.add(LeakyReLU())
model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2]))
model.add(LeakyReLU())
model.add(Flatten())
model.add(Dense(1024, kernel_initializer='he_normal'))
model.add(LeakyReLU())
model.add(Dense(1, kernel_initializer='he_normal'))
return model
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
'''Creates model comprised of 2 convolutional layers followed by dense layers
dense_layer_sizes: List of layer sizes.
This list has one number for each layer
filters: Number of convolutional filters in each convolutional layer
kernel_size: Convolutional kernel size
pool_size: Size of pooling area for max pooling
'''
model = Sequential()
model.add(Conv2D(filters, kernel_size,
padding='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
def make_teacher_model(train_data, validation_data, epochs=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(num_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y,
epochs=epochs,
validation_data=validation_data)
return model, history
def load_model(input_shape, num_classes):
model = Sequential()
model.add(Convolution2D(6, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding="same"))
model.add(Convolution2D(32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, kernel_size=(3, 3), border_mode='same', activation='relu'))
model.add(Convolution2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
return model
def predict(model, img):
#Flatten it
image = np.array(img).flatten()
# float32
image = image.astype('float32')
# normalize it
image = image / 255
# reshape for NN
rimage = image.reshape(1, img_rows, img_colms,img_channels)
# Now feed it to the NN, to fetch the predictions
clas = model.predict(rimage)
#prob_array = model.predict_proba(rimage)
return clas
def test_valid_workflow(self):
# Create image URI dataframe
label_cardinality = 10
image_uri_df = self._create_train_image_uris_and_labels(
repeat_factor=3, cardinality=label_cardinality)
# We need a small model so that machines with limited resources can run it
model = Sequential()
model.add(Flatten(input_shape=(299, 299, 3)))
model.add(Dense(label_cardinality))
model.add(Activation("softmax"))
estimator = self._get_estimator(model)
self.assertTrue(estimator._validateParams())
transformers = estimator.fit(image_uri_df)
self.assertEqual(1, len(transformers))
self.assertIsInstance(transformers[0]['transformer'], KerasImageFileTransformer)
cnn.py 文件源码
项目:Nature-Conservancy-Fish-Image-Prediction
作者: Brok-Bucholtz
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def train(img_shape):
classes = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
# Model
model = Sequential()
model.add(Convolution2D(
32, 3, 3, input_shape=img_shape, activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(len(classes), activation='softmax'))
features, labels = get_featurs_labels(img_shape)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(features, labels, nb_epoch=10, batch_size=32, validation_split=0.2, verbose=1)
return model
def test_highway_layers():
n_highway_layers = 5
x = Input(shape=(8,), dtype="int32")
v = Embedding(input_dim=2, output_dim=10)(x)
v = Flatten()(v)
assert hasattr(v, "_keras_shape")
v = highway_layers(v, n_layers=n_highway_layers)
output = Dense(1)(v)
model = Model(inputs=[x], outputs=[output])
assert len(model.layers) > n_highway_layers * 3
x = np.array([
[1] + [0] * 7,
[0] * 8,
[0] * 7 + [1]])
y = np.array([0, 1, 0])
model.compile("rmsprop", "mse")
model.fit(x, y, epochs=10)
pred = model.predict(x)
mean_diff = np.abs(pred - y).mean()
assert mean_diff < 0.5, pred
def create_network():
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return KerasNetwork(model, 'cnn_weights.hd5')
def create_conv_model(self):
# This is the place where neural network model initialized
init = 'glorot_uniform'
self.state_in = Input(self.state_dim)
self.l1 = Convolution2D(32, 8, 8, activation='elu', init=init, subsample=(4, 4), border_mode='same')(
self.state_in)
self.l2 = Convolution2D(64, 4, 4, activation='elu', init=init, subsample=(2, 2), border_mode='same')(
self.l1)
# self.l3 = Convolution2D(64, 3, 3, activation='relu', init=init, subsample=(1, 1), border_mode='same')(
# self.l2)
self.l3 = self.l2
self.h = Flatten()(self.l3)
self.hidden = Dense(256, init=init, activation='elu')(self.h)
self.value = Dense(1, init=init)(self.hidden)
self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)
self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
axis=[1], keepdims=True), (1, self.action_dim)))
self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
self.model = Model(self.state_in, output=[self.policy, self.value])
def make_model(state_shape, n_actions):
in_t = Input(shape=(HISTORY_STEPS,) + state_shape, name='input')
action_t = Input(shape=(1,), dtype='int32', name='action')
advantage_t = Input(shape=(1,), name='advantage')
fl_t = Flatten(name='flat')(in_t)
l1_t = Dense(SIMPLE_L1_SIZE, activation='relu', name='l1')(fl_t)
l2_t = Dense(SIMPLE_L2_SIZE, activation='relu', name='l2')(l1_t)
policy_t = Dense(n_actions, name='policy', activation='softmax')(l2_t)
def loss_func(args):
p_t, act_t, adv_t = args
oh_t = K.one_hot(act_t, n_actions)
oh_t = K.squeeze(oh_t, 1)
p_oh_t = K.log(1e-6 + K.sum(oh_t * p_t, axis=-1, keepdims=True))
res_t = adv_t * p_oh_t
return -res_t
loss_t = Lambda(loss_func, output_shape=(1,), name='loss')([policy_t, action_t, advantage_t])
return Model(input=[in_t, action_t, advantage_t], output=[policy_t, loss_t])
def net_input(env):
"""
Create input part of the network with optional prescaling.
:return: input_tensor, output_tensor
"""
in_t = Input(shape=env.observation_space.shape, name='input')
out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(in_t)
out_t = MaxPooling2D((2, 2))(out_t)
out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(out_t)
out_t = MaxPooling2D((2, 2))(out_t)
out_t = Conv2D(64, 4, 4, activation='relu', border_mode='same')(out_t)
out_t = MaxPooling2D((2, 2))(out_t)
out_t = Conv2D(64, 3, 3, activation='relu', border_mode='same')(out_t)
out_t = Flatten(name='flat')(out_t)
out_t = Dense(512, name='l1', activation='relu')(out_t)
return in_t, out_t
def make_teacher_model(train_data, validation_data, epochs=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(num_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y,
epochs=epochs,
validation_data=validation_data)
return model, history
def build_model(self):
initializer = initializers.random_normal(stddev=0.02)
model = Sequential()
if self.padding:
model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns)))
model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first",
strides=(4, 4), kernel_initializer=initializer, padding='same',
input_shape=(self.layers, self.rows, self.columns)))
model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2),
kernel_initializer=initializer, padding='same'))
model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1),
kernel_initializer=initializer, padding='same'))
model.add(Flatten())
model.add(Dense(512, activation="relu", kernel_initializer=initializer))
model.add(Dense(self.actions_num, kernel_initializer=initializer))
adam = Adam(lr=1e-6)
model.compile(loss='mse', optimizer=adam)
return model
def test_model_pipe_keras(self):
model = Sequential()
model.add(Flatten(input_shape=(1, 28, 28)))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
p = model_util.ModelPipe()
input_data = [np.random.random((1, 1, 28, 28)) for _ in range(2)]
p.add(model.predict, batch_size=64, batcher=np.vstack)
expected_output = [
model.predict(
x.reshape(
(1, 1, 28, 28))) for x in input_data]
output = p.apply_ordered(input_data)
self.assertTrue(np.isclose(np.array(output).flatten(),
np.array(expected_output).flatten()).all())
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.missing_shape)
validity = model(img)
return Model(img, validity)