def test_double_dqn():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
memory = SequentialMemory(limit=1000, window_length=1)
policy = EpsGreedyQPolicy(eps=.1)
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
target_model_update=1e-1, policy=policy, enable_double_dqn=True)
dqn.compile(Adam(lr=1e-3))
dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
policy.eps = 0.
h = dqn.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
python类Adam()的实例源码
def test_duel_dqn():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
memory = SequentialMemory(limit=1000, window_length=1)
policy = EpsGreedyQPolicy(eps=.1)
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
target_model_update=1e-1, policy=policy, enable_double_dqn=False, enable_dueling_network=True)
dqn.compile(Adam(lr=1e-3))
dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
policy.eps = 0.
h = dqn.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
def test_sarsa():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
policy = EpsGreedyQPolicy(eps=.1)
sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=50, policy=policy)
sarsa.compile(Adam(lr=1e-3))
sarsa.fit(env, nb_steps=20000, visualize=False, verbose=0)
policy.eps = 0.
h = sarsa.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
def get_optimizer(name='Adadelta'):
if name == 'SGD':
return optimizers.SGD(clipnorm=1.)
if name == 'RMSprop':
return optimizers.RMSprop(clipnorm=1.)
if name == 'Adagrad':
return optimizers.Adagrad(clipnorm=1.)
if name == 'Adadelta':
return optimizers.Adadelta(clipnorm=1.)
if name == 'Adam':
return optimizers.Adam(clipnorm=1.)
if name == 'Adamax':
return optimizers.Adamax(clipnorm=1.)
if name == 'Nadam':
return optimizers.Nadam(clipnorm=1.)
return optimizers.Adam(clipnorm=1.)
def create_model(self, epsilon):
"""Return a compiled model and the state and action input
layers with the given epsilon for numerical stability.
"""
inputs = Input(shape=(self.state_shape,))
action_input = Input(shape=(self.action_shape,))
x1 = Dense(self.neurons_per_layer[0], activation='relu')(inputs)
x1 = Dense(self.neurons_per_layer[1], activation='relu')(x1)
x2 = Dense(self.neurons_per_layer[1], activation='relu')(action_input)
x = add([x1, x2])
for n in self.neurons_per_layer[2:]:
x = Dense(n, activation='relu')(x)
outputs = Dense(self.action_shape)(x)
model = Model(inputs=[inputs, action_input], outputs=outputs)
assert self.optimizer_choice in ['adam', 'rmsprop']
if self.optimizer_choice == 'adam':
opti = Adam(lr=self.alpha, epsilon=epsilon)
else:
opti = RMSprop(lr=self.alpha, epsilon=epsilon)
model.compile(optimizer=opti, loss='mse')
return model, inputs, action_input
def build_model(config):
"""Builds the cnn."""
params = config.model_arch
get_model = getattr(models, 'get_model_'+str(params['architecture']))
model = get_model(params)
#model = model_kenun.build_convnet_model(params)
# Learning setup
t_params = config.training_params
sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
momentum=t_params["momentum"], nesterov=t_params["nesterov"])
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
optimizer = eval(t_params['optimizer'])
metrics = ['mean_squared_error']
if config.model_arch["final_activation"] == 'softmax':
metrics.append('categorical_accuracy')
if t_params['loss_func'] == 'cosine':
loss_func = eval(t_params['loss_func'])
else:
loss_func = t_params['loss_func']
model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def get_q_network(weights_path):
model = Sequential()
model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name), input_shape=(25112,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(6, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
model.add(Activation('linear'))
adam = Adam(lr=1e-6)
model.compile(loss='mse', optimizer=adam)
if weights_path != "0":
model.load_weights(weights_path)
return model
example_gan_cifar10.py 文件源码
项目:Deep-Learning-with-Keras
作者: PacktPublishing
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def main():
# z \in R^100
latent_dim = 100
# x \in R^{28x28}
# generator (z -> x)
generator = model_generator()
# discriminator (x -> y)
discriminator = model_discriminator()
example_gan(AdversarialOptimizerSimultaneous(), "output/gan-cifar10",
opt_g=Adam(1e-4, decay=1e-5),
opt_d=Adam(1e-3, decay=1e-5),
nb_epoch=100, generator=generator, discriminator=discriminator,
latent_dim=latent_dim)
def compile_masked(model, lr=0.0005, num_classes=2):
beta_1 = 0.9
beta_2 = 0.999
epsilon = 10 ** (-8)
optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, clipnorm=1.)
loss = [lambda y_true, y_pred: y_pred]
model.compile(
optimizer=optimizer,
loss=loss,
)
return model
def cnn3adam_slim(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
model = Sequential(name='cnn3adam')
model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam())
return model
def cnn3adam_filter(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('use L2 model instead!')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
model = Sequential(name='cnn3adam_filter')
model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten(name='conv3'))
model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization(name='fc1'))
model.add(Dropout(0.5))
model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization(name='fc2'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
return model
def cnn3adam_filter_l2(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('use more L2 model instead!')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
model = Sequential(name='cnn3adam_filter_l2')
model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape,
kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten(name='conv3'))
model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1'))
model.add(BatchNormalization(name='bn1'))
model.add(Dropout(0.5, name='do1'))
model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2'))
model.add(BatchNormalization(name='bn2'))
model.add(Dropout(0.5, name='do2'))
model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
# print('reset learning rate')
return model
def cnn3adam_filter_morel2_slim(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
"""
model = Sequential(name='cnn3adam_filter_morel2_slim')
model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape,
kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(MaxPooling1D())
model.add(Flatten(name='conv3'))
model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1'))
model.add(BatchNormalization(name='bn1'))
model.add(Dropout(0.5, name='do1'))
model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2'))
model.add(BatchNormalization(name='bn2'))
model.add(Dropout(0.5, name='do2'))
model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
# print('reset learning rate')
return model
def ann(input_shape, n_classes, layers=2, neurons=80, dropout=0.35 ):
"""
for working with extracted features
"""
model = Sequential(name='ann')
for l in range(layers):
model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
return model
def ann_rnn(input_shape, n_classes):
"""
for working with extracted features
"""
model = Sequential(name='ann_rnn')
model.add(TimeDistributed(Dense (80, activation='elu', kernel_initializer='he_normal'), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Dropout(0.35))
model.add(TimeDistributed(Dense (80, activation='elu', kernel_initializer='he_normal')))
model.add(BatchNormalization())
model.add(Dropout(0.35))
model.add(LSTM(50))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
return model
def pure_rnn_do(input_shape, n_classes,layers=2, neurons=80, dropout=0.3):
"""
just replace ANN by RNNs
"""
model = Sequential(name='pure_rnn')
model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout))
for i in range(layers-1):
model.add(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy])
return model
def pure_rnn_3(input_shape, n_classes):
"""
just replace ANN by 3xRNNs
"""
model = Sequential(name='pure_rnn')
model.add(LSTM(80, return_sequences=True, input_shape=input_shape))
model.add(LSTM(80, return_sequences=True))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
return model
#%%
def _init_from_scratch(self):
if self.model_name == 'log_reg':
self.model = self.log_reg_model()
if self.model_name == 'svc':
self.model = self.svc_model()
if self.model_name == 'cnn_word':
self.model = self.cnn_word_model()
if self.model_name == 'lstm_word':
self.model = self.lstm_word_model()
if self.model_type == 'nn':
optimizer = Adam(lr=self.opt['learning_rate'], decay=self.opt['learning_decay'])
self.model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['binary_accuracy'])
def get_convo_nn2(no_word=200, n_gram=21, no_char=178):
input1 = Input(shape=(n_gram,))
input2 = Input(shape=(n_gram,))
a = Embedding(no_char, 32, input_length=n_gram)(input1)
a = SpatialDropout1D(0.15)(a)
a = BatchNormalization()(a)
a_concat = []
for i in range(1,9):
a_concat.append(conv_unit(a, n_gram, no_word, window = i))
for i in range(9,12):
a_concat.append(conv_unit(a, n_gram, no_word-50, window = i))
a_concat.append(conv_unit(a, n_gram, no_word-100, window = 12))
a_sum = Maximum()(a_concat)
b = Embedding(12, 12, input_length=n_gram)(input2)
b = SpatialDropout1D(0.15)(b)
x = Concatenate(axis=-1)([a, a_sum, b])
#x = Concatenate(axis=-1)([a_sum, b])
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(100, activation='relu')(x)
out = Dense(1, activation='sigmoid')(x)
model = Model(inputs=[input1, input2], outputs=out)
model.compile(optimizer=Adam(),
loss='binary_crossentropy', metrics=['acc'])
return model
def add_fit_args(train):
train.add_argument('--ngpus', default=1, type=int, help='amount of gpus')
train.add_argument('--versn', default='rn-21', type=str, help='version of net')
train.add_argument('--begin', default=0, type=int, help='start epoch')
train.add_argument('--batch', default=8000, type=int, help='the batch size')
train.add_argument('--nepoh', default=30, type=int, help='amount of epoch')
train.add_argument('--check', default=20, type=int, help='period of check in iteration')
train.add_argument('--lrate', default=0.001, type=float, help='start learning rate')
train.add_argument('--optim', default='adam', type=str, help='optimizer')
train.add_argument('--patin', default=15, type=int, help='waiting for n iteration without improvement')
train.add_argument('--losss', default='categorical_crossentropy', type=str, help='loss function')
train.add_argument('--mtype', default=1, type=int, help='neurons on branch audio')
train.add_argument('--wpath', default=WPATH, type=str, help='net symbol path')
train.add_argument('--dpath', default=FAST, type=str, help='data_path')
train.add_argument('--split', default=200000, type=int, help='data_path')
return train
def build_mod5(opt=adam()):
n = 3 * 1024
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n)
x1 = fc_identity(x1, n)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n)
x2 = fc_identity(x2, n)
x = merge([x1, x2], mode='concat', concat_axis=1)
x = fc_identity(x, n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
# plot(model=model, show_shapes=True)
return model
def EES_train():
EES = model_EES16()
EES.compile(optimizer=adam(lr=0.0003), loss='mse')
print EES.summary()
data, label = pd.read_training_data("./train.h5")
val_data, val_label = pd.read_training_data("./val.h5")
checkpoint = ModelCheckpoint("EES_check.h5", monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=False, mode='min')
callbacks_list = [checkpoint]
history_callback = EES.fit(data, label, batch_size=64, validation_data=(val_data, val_label),
callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1)
pandas.DataFrame(history_callback.history).to_csv("history.csv")
EES.save_weights("EES_final.h5")
def __init__(self, scale=3, load_set=None, build_model=None,
optimizer='adam', save_dir='.'):
self.scale = scale
self.load_set = partial(load_set, scale=scale)
self.build_model = partial(build_model, scale=scale)
self.optimizer = optimizer
self.save_dir = Path(save_dir)
self.save_dir.mkdir(parents=True, exist_ok=True)
self.config_file = self.save_dir / 'config.yaml'
self.model_file = self.save_dir / 'model.hdf5'
self.train_dir = self.save_dir / 'train'
self.train_dir.mkdir(exist_ok=True)
self.history_file = self.train_dir / 'history.csv'
self.weights_dir = self.train_dir / 'weights'
self.weights_dir.mkdir(exist_ok=True)
self.test_dir = self.save_dir / 'test'
self.test_dir.mkdir(exist_ok=True)
def build_mod2(opt=adam()):
n = 2 * 1024
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n)
# x1 = fc_block1(x1, n)
x1 = fc_identity(x1, n)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n)
# x2 = fc_block1(x2, n)
x2 = fc_identity(x2, n)
x = merge([x1, x2], mode='concat', concat_axis=1)
# x = fc_block1(x, n)
x = fc_identity(x, n)
x = fc_block1(x, n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
plot(model=model, show_shapes=True)
return model
def build_mod3(opt=adam()):
n = 2 * 1024
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n)
x1 = fc_identity(x1, n)
x1 = fc_identity(x1, n)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n)
x2 = fc_identity(x2, n)
x2 = fc_identity(x2, n)
x = merge([x1, x2], mode='concat', concat_axis=1)
x = fc_identity(x, n)
x = fc_identity(x, n)
x = fc_block1(x, n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
plot(model=model, show_shapes=True)
return model
def build_mod7(opt=adam()):
n = 3 * 1024
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n)
x1 = fc_identity(x1, n)
# x1 = fc_identity(x1, n)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n)
x2 = fc_identity(x2, n)
# x2 = fc_identity(x2, n)
x = merge([x1, x2], mode='concat', concat_axis=1)
x = fc_identity(x, n)
# x = fc_identity(x, n)
x = fc_block1(x, n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
plot(model=model, show_shapes=True)
return model
def build_mod8(opt=adam()):
n = 3 * 1024
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n)
x1 = fc_identity(x1, n)
# x1 = fc_identity(x1, n)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n)
x2 = fc_identity(x2, n)
# x2 = fc_identity(x2, n)
x = merge([x1, x2], mode='concat', concat_axis=1)
x = fc_identity(x, 4000)
# x = fc_identity(x, n)
# x = fc_block1(x, n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
plot(model=model, show_shapes=True)
return model
def build_mod4(opt=adam()):
n = 1500
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n)
x1 = fc_identity(x1, n)
x1 = fc_identity(x1, n)
x1 = fc_identity(x1, n)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n)
x2 = fc_identity(x2, n)
x2 = fc_identity(x2, n)
x2 = fc_identity(x2, n)
x = merge([x1, x2], mode='concat', concat_axis=1)
x = fc_identity(x, n)
x = fc_identity(x, n)
x = fc_identity(x, n)
x = fc_identity(x, n)
x = fc_block1(x, 2*n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
# plot(model=model, show_shapes=True)
return model
def build_mod9(opt=adam()):
n = int(2.2 * 1024)
in1 = Input((128,), name='x1')
x1 = fc_block1(in1, n, d=0.3)
x1 = fc_identity(x1, n, d=0.3)
x1 = fc_identity(x1, n, d=0.3)
in2 = Input((1024,), name='x2')
x2 = fc_block1(in2, n, d=0.3)
x2 = fc_identity(x2, n, d=0.3)
x2 = fc_identity(x2, n, d=0.3)
x = merge([x1, x2], mode='concat', concat_axis=1)
x = fc_identity(x, n, d=0.3)
x = fc_identity(x, n, d=0.3)
x = fc_block1(x, n)
out = Dense(4716, activation='sigmoid', name='output')(x)
model = Model(input=[in1, in2], output=out)
model.compile(optimizer=opt, loss='categorical_crossentropy')
# model.summary()
# plot(model=model, show_shapes=True)
return model