def build_network(deepest=False):
dropout = [0., 0.1, 0.2, 0.3, 0.4]
conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
input= Input(shape=(3, 32, 32))
output = fractal_net(
c=3, b=5, conv=conv,
drop_path=0.15, dropout=dropout,
deepest=deepest)(input)
output = Flatten()(output)
output = Dense(NB_CLASSES, init='he_normal')(output)
output = Activation('softmax')(output)
model = Model(input=input, output=output)
optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
#optimizer = RMSprop(lr=LEARN_START)
#optimizer = Adam()
#optimizer = Nadam()
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
plot(model, to_file='model.png')
return model
python类Nadam()的实例源码
def build_network(deepest=False):
dropout = [0., 0.1, 0.2, 0.3, 0.4]
conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
output = fractal_net(
c=3, b=5, conv=conv,
drop_path=0.15, dropout=dropout,
deepest=deepest)(input)
output = Flatten()(output)
output = Dense(NB_CLASSES, init='he_normal')(output)
output = Activation('softmax')(output)
model = Model(input=input, output=output)
#optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
#optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
optimizer = Adam()
#optimizer = Nadam()
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
plot(model, to_file='model.png', show_shapes=True)
return model
def GatedPixelCNN(input_shape, filters, depth, latent=None, build=True):
height, width, channels = input_shape
palette = 256 # TODO: Make it scalable to any amount of palette.
input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')
latent_vector = None
if latent is not None:
latent_vector = Input(shape=(latent,), name='latent_vector')
model = GatedCNNs(filters, depth, latent_vector)(*GatedCNN(filters, latent_vector)(input_img))
for _ in range(2):
model = Convolution2D(filters, 1, 1, border_mode='valid')(model)
model = PReLU()(model)
outs = OutChannels(*input_shape, masked=False, palette=palette)(model)
if build:
model = Model(input=[input_img, latent_vector] if latent is not None else input_img, output=outs)
model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')
return model
def PixelCNN(input_shape, filters, depth, build=True):
height, width, channels = input_shape
palette = 256 # TODO: Make it scalable to any amount of palette.
input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')
model = MaskedConvolution2D(filters, 7, 7, mask='A', border_mode='same', name='masked2d_A')(input_img)
model = ResidualBlockList(filters, depth)(model)
model = PReLU()(model)
for _ in range(2):
model = MaskedConvolution2D(filters, 1, 1, border_mode='valid')(model)
model = PReLU()(model)
outs = OutChannels(*input_shape, masked=True, palette=palette)(model)
if build:
model = Model(input=input_img, output=outs)
model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')
return model
def get_optimizer(name='Adadelta'):
if name == 'SGD':
return optimizers.SGD(clipnorm=1.)
if name == 'RMSprop':
return optimizers.RMSprop(clipnorm=1.)
if name == 'Adagrad':
return optimizers.Adagrad(clipnorm=1.)
if name == 'Adadelta':
return optimizers.Adadelta(clipnorm=1.)
if name == 'Adam':
return optimizers.Adam(clipnorm=1.)
if name == 'Adamax':
return optimizers.Adamax(clipnorm=1.)
if name == 'Nadam':
return optimizers.Nadam(clipnorm=1.)
return optimizers.Adam(clipnorm=1.)
def setOptimizer(self, **kwargs):
"""
Sets a new optimizer for the Translation_Model.
:param **kwargs:
"""
# compile differently depending if our model is 'Sequential' or 'Graph'
if self.verbose > 0:
logging.info("Preparing optimizer and compiling.")
if self.params['OPTIMIZER'].lower() == 'adam':
optimizer = Adam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'rmsprop':
optimizer = RMSprop(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'nadam':
optimizer = Nadam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'adadelta':
optimizer = Adadelta(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'sgd':
optimizer = SGD(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
else:
logging.info('\tWARNING: The modification of the LR is not implemented for the chosen optimizer.')
optimizer = eval(self.params['OPTIMIZER'])
self.model.compile(optimizer=optimizer, loss=self.params['LOSS'],
sample_weight_mode='temporal' if self.params['SAMPLE_WEIGHTS'] else None)
model.py 文件源码
项目:Deep-Learning-para-diagnostico-a-partir-de-imagenes-Biomedicas
作者: pacocp
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def create_model_RES():
inp = Input((110, 110, 3))
cnv1 = Conv2D(64, 3, 3, subsample=[2,2], activation='relu', border_mode='same')(inp)
r1 = Residual(64, 64, cnv1)
# An example residual unit coming after a convolutional layer. NOTE: the above residual takes the 64 output channels
# from the Convolutional2D layer as the first argument to the Residual function
r2 = Residual(64, 64, r1)
cnv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(r2)
r3 = Residual(64, 64, cnv2)
r4 = Residual(64, 64, r3)
cnv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(r4)
r5 = Residual(128, 128, cnv3)
r6 = Residual(128, 128, r5)
maxpool = MaxPooling2D(pool_size=(7, 7))(r6)
flatten = Flatten()(maxpool)
dense1 = Dense(128, activation='relu')(flatten)
out = Dense(2, activation='softmax')(dense1)
model = Model(input=inp, output=out)
model.compile(loss='categorical_crossentropy',
optimizer=Nadam(lr=1e-4), metrics=['accuracy'])
return model
def test_nadam():
_test_optimizer(Nadam())
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
# TODO: Soft targets? A float to make targets a gaussian with stdev.
# TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
# TODO: Global conditioning?
# TODO: Local conditioning?
_, nb_bins = input_shape
input_audio = Input(input_shape, name='audio_input')
model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)
out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)
out = Merge(mode='sum', name='merging_skips')(skip_connections)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
# https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
if last > 0:
out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)
out = Activation('softmax')(out)
if build:
model = Model(input_audio, out)
model.compile(Nadam(), 'sparse_categorical_crossentropy')
return model
def __init__(self, hidden_layer_shape = [64], weight_decay=1e-4,
batch_normalization=True, activation='relu', save_fname=None,
patience = 6, lr=2e-3, min_lr = 2e-6, verbose = 2, mu=None,
refit = False, gpu_list = None, optimizer=None, nb_epochs=1000,
kernel_initializer = 'glorot_normal', lr_patience = 3):
self.model = Sequential()
self.hidden = hidden_layer_shape
self.wd = weight_decay
self.bn = batch_normalization
self.activation = activation
self.fname = save_fname
self.patience = patience
self.lr = lr
self.min_lr = min_lr
self.verbose = verbose
self.mu = mu
self.epochs = nb_epochs
self.refit = refit
self.gpus = gpu_list
self.ki = kernel_initializer
self.lr_patience = lr_patience
if optimizer is None:
self.opt = Nadam(self.lr)
if self.refit:
raise NotImplementedError('I have not implemented the refit functionality yet.')
def test_nadam():
_test_optimizer(Nadam())
def create_model(L, hidden_sizes=[4], hidden_act='tanh', act='sigmoid', loss='binary_crossentropy',
Z=True, X=False, learning_rate=0.002,
normcentererr_p=None, batchnorm=0):
in_dim = L**2 * (X+Z)
out_dim = 2*L**2 * (X+Z)
model = Sequential()
model.add(Dense(int(hidden_sizes[0]*out_dim), input_dim=in_dim, kernel_initializer='glorot_uniform'))
if batchnorm:
model.add(BatchNormalization(momentum=batchnorm))
model.add(Activation(hidden_act))
for s in hidden_sizes[1:]:
model.add(Dense(int(s*out_dim), kernel_initializer='glorot_uniform'))
if batchnorm:
model.add(BatchNormalization(momentum=batchnorm))
model.add(Activation(hidden_act))
model.add(Dense(out_dim, kernel_initializer='glorot_uniform'))
if batchnorm:
model.add(BatchNormalization(momentum=batchnorm))
model.add(Activation(act))
c = CodeCosts(L, ToricCode, Z, X, normcentererr_p)
losses = {'e_binary_crossentropy':c.e_binary_crossentropy,
's_binary_crossentropy':c.s_binary_crossentropy,
'se_binary_crossentropy':c.se_binary_crossentropy}
model.compile(loss=losses.get(loss,loss),
optimizer=Nadam(lr=learning_rate),
metrics=[c.triv_no_error, c.e_binary_crossentropy, c.s_binary_crossentropy]
)
return model
def test_nadam():
_test_optimizer(Nadam())
def __init__(self, word_index, embedding_matrix):
embedding_layer_c = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_C,
trainable=False)
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_context = embedding_layer_c(context)
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
l_lstm_c = Bidirectional(LSTM(60))(embedded_context)
l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)
concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
relu_c_q = Dense(100, activation='relu')(concat_c_q)
relu_c_q = Dropout(0.25)(relu_c_q)
concat_c_q_a = concatenate([l_lstm_a, relu_c_q], axis = 1)
softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
self.model = Model([question, answer, context], softmax_c_q_a)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
def __init__(self, word_index, embedding_matrix):
embedding_layer_c = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_C,
trainable=False)
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_context = embedding_layer_c(context)
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
l_lstm_c = Bidirectional(LSTM(60, return_sequences=True))(embedded_context)
l_lstm_c = Bidirectional(LSTM(60))(l_lstm_c)
l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)
concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
relu_c_q = Dense(100, activation='relu')(concat_c_q)
relu_c_q = Dropout(0.25)(relu_c_q)
concat_c_q_a = concatenate([l_lstm_a, relu_c_q], axis = 1)
softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
self.model = Model([question, answer, context], softmax_c_q_a)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
def __init__(self, word_index, embedding_matrix):
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)
concat_c_q_a = concatenate([l_lstm_a, l_lstm_q], axis = 1)
softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
self.model = Model([question, answer], softmax_c_q_a)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
def __init__(self, word_index, embedding_matrix):
embedding_layer_c = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_C,
trainable=False)
embedding_layer_q = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedding_layer_a = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH_A,
trainable=False)
context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
embedded_context = embedding_layer_c(context)
embedded_question = embedding_layer_q(question)
embedded_answer = embedding_layer_a(answer)
l_lstm_c = Bidirectional(LSTM(60))(embedded_context)
l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)
concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
relu_c_q = Dense(100, activation='tanh')(concat_c_q)
concat_c_a = concatenate([l_lstm_a, l_lstm_c], axis=1)
relu_c_a = Dense(100, activation='tanh')(concat_c_a)
relu_c_q = Dropout(0.5)(relu_c_q)
relu_c_a = Dropout(0.5)(relu_c_a)
concat_c_q_a = merge([relu_c_a, relu_c_q], mode='cos')
softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
self.model = Model([question, answer, context], softmax_c_q_a)
opt = Nadam()
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
def MLP(opt='nadam'):
X_raw=Input(shape=(LEN_RAW_INPUT,),name='input_raw')
fc1=BatchNormalization()(X_raw)
fc1=Dense(256)(fc1)
fc1=PReLU()(fc1)
fc1=Dropout(0.2)(fc1)
fc1=BatchNormalization()(fc1)
fc1=Dense(256)(fc1)
fc1=PReLU()(fc1)
#fc1=Dropout(0.2)(fc1)
fc1=BatchNormalization()(fc1)
auxiliary_output_dense = Dense(1, activation='sigmoid', name='aux_output_dense')(fc1)
output_all = Dense(1,activation='sigmoid',name='output')(fc1)
model=Model(input=X_raw,output=output_all)
model.compile(
optimizer=opt,
loss = 'binary_crossentropy')
return model
#nadam=Nadam(lr=0.000)
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
''' #example:
from keras import layers
from keras import models
from keras import optimizers
input_ = layers.Input(shape=(self.input_dims_,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.2, seed=1)(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
'''
raise Exception('implement this!')
#@tf_force_cpu
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(1024, kernel_initializer='Orthogonal')(model)
#model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(512, kernel_initializer='Orthogonal')(model)
#model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
model = layers.BatchNormalization()(model)
model = layers.advanced_activations.PReLU()(model)
#model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def build_keras_model(self):
input_ = layers.Input(shape=(self.input_dims_,))
#model = layers.noise.GaussianNoise(0.005)(input_)
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.2)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
model = layers.Activation('selu')(model)
#model = layers.noise.AlphaDropout(0.1, seed=1)(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.4)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
def keras_mlp1(train2, y, test2, v, z):
from keras import layers
from keras import models
from keras import optimizers
cname = sys._getframe().f_code.co_name
num_splits = 9
scaler = preprocessing.RobustScaler()
train3 = scaler.fit_transform(train2)
test3 = scaler.transform(test2)
input_dims = train3.shape[1]
def build_model():
input_ = layers.Input(shape=(input_dims,))
model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
#model = layers.BatchNormalization()(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Activation('selu')(model)
#model = layers.Dropout(0.7)(model)
model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
#model = layers.Dropout(0.9)(model)
model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
#model = layers.BatchNormalization()(model)
model = layers.Activation('selu')(model)
#model = layers.advanced_activations.PReLU()(model)
model = layers.Dense(1, activation='sigmoid')(model)
model = models.Model(input_, model)
model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
#print(model.summary(line_length=120))
return model
keras_common(train3, y, test3, v, z, num_splits, cname, build_model)