def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if args.algorithm == 'rmsprop':
optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'sgd':
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adagrad':
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adadelta':
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adam':
optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adamax':
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
python类SGD的实例源码
def create_model(img_rows, img_cols):
model = Sequential() #initialize model
model.add(Convolution2D(4, 3, 3, border_mode='same', activation='relu', init='he_normal',
input_shape=(1, img_rows, img_cols)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(8, 3, 3, border_mode='same', activation='relu', init='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(2))
model.add(Activation('softmax'))
adm = Adamax()
#sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=adm, loss='categorical_crossentropy')
return model
def setOptimizer(self, **kwargs):
"""
Sets a new optimizer for the Translation_Model.
:param **kwargs:
"""
# compile differently depending if our model is 'Sequential' or 'Graph'
if self.verbose > 0:
logging.info("Preparing optimizer and compiling.")
if self.params['OPTIMIZER'].lower() == 'adam':
optimizer = Adam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'rmsprop':
optimizer = RMSprop(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'nadam':
optimizer = Nadam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'adadelta':
optimizer = Adadelta(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
elif self.params['OPTIMIZER'].lower() == 'sgd':
optimizer = SGD(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
else:
logging.info('\tWARNING: The modification of the LR is not implemented for the chosen optimizer.')
optimizer = eval(self.params['OPTIMIZER'])
self.model.compile(optimizer=optimizer, loss=self.params['LOSS'],
sample_weight_mode='temporal' if self.params['SAMPLE_WEIGHTS'] else None)
def make_teacher_model(train_data, validation_data, nb_epoch=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, 3, input_shape=input_shape,
border_mode='same', name='conv1'))
model.add(MaxPooling2D(name='pool1'))
model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
model.add(MaxPooling2D(name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(nb_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
validation_data=validation_data)
return model, history
def preds3d_baseline(width):
learning_rate = 5e-5
#optimizer = SGD(lr=learning_rate, momentum = 0.9, decay = 1e-3, nesterov = True)
optimizer = Adam(lr=learning_rate)
inputs = Input(shape=(1, 136, 168, 168))
conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
conv1 = BatchNormalization(axis = 1)(conv1)
conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
conv1 = BatchNormalization(axis = 1)(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv1)
conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
conv2 = BatchNormalization(axis = 1)(conv2)
conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
conv2 = BatchNormalization(axis = 1)(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv2)
conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
conv3 = BatchNormalization(axis = 1)(conv3)
conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
conv3 = BatchNormalization(axis = 1)(conv3)
pool3 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv3)
output = GlobalAveragePooling3D()(pool3)
output = Dense(2, activation='softmax', name = 'predictions')(output)
model3d = Model(inputs, output)
model3d.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
return model3d
def obtain_compiled_vgg_16(vgg_weights_path):
model = vgg_16(vgg_weights_path)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
return model
def generate(batch_size, pretty=False):
generator = generator_model()
generator.compile(loss='binary_crossentropy', optimizer="SGD")
generator.load_weights('generator_weights')
if pretty:
discriminator = discriminator_model()
discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
discriminator.load_weights('discriminator_weights')
noise = np.zeros((batch_size*20, 100))
for i in range(batch_size*20):
noise[i, :] = np.random.uniform(-1, 1, 100)
generated_images = generator.predict(noise, verbose=1)
d_pret = discriminator.predict(generated_images, verbose=1)
index = np.arange(0, batch_size*20)
index.resize((batch_size*20, 1))
pre_with_index = list(np.append(d_pret, index, axis=1))
pre_with_index.sort(key=lambda x: x[0], reverse=True)
pretty_images = np.zeros((batch_size, 1) +
(generated_images.shape[2:]), dtype=np.float32)
for i in range(int(batch_size)):
idx = int(pre_with_index[i][1])
pretty_images[i, 0, :, :] = generated_images[idx, 0, :, :]
image = combine_images(pretty_images)
else:
noise = np.zeros((batch_size, 100))
for i in range(batch_size):
noise[i, :] = np.random.uniform(-1, 1, 100)
generated_images = generator.predict(noise, verbose=1)
image = combine_images(generated_images)
image = image*127.5+127.5
Image.fromarray(image.astype(np.uint8)).save(
"images/generated_image.png")
def generate(BATCH_SIZE, nice=False):
(X_train, Y_train) = get_data('test')
#print(np.shape(X_train))
X_train = (X_train.astype(np.float32) - 127.5)/127.5
Y_train = (Y_train.astype(np.float32) - 127.5)/127.5
generator = generator_model()
generator.compile(loss='binary_crossentropy', optimizer="SGD")
generator.load_weights('generator')
if nice:
discriminator = discriminator_model()
discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
discriminator.load_weights('discriminator')
generated_images = generator.predict(X_train, verbose=1)
d_pret = discriminator.predict(generated_images, verbose=1)
index = np.arange(0, BATCH_SIZE*20)
index.resize((BATCH_SIZE*20, 1))
pre_with_index = list(np.append(d_pret, index, axis=1))
pre_with_index.sort(key=lambda x: x[0], reverse=True)
nice_images = np.zeros((BATCH_SIZE, 1) + (generated_images.shape[2:]), dtype=np.float32)
for i in range(int(BATCH_SIZE)):
idx = int(pre_with_index[i][1])
nice_images[i, 0, :, :] = generated_images[idx, 0, :, :]
image = combine_images(nice_images)
else:
generated_images = generator.predict(X_train)
image = combine_images(generated_images)
image = image*127.5+127.5
image = np.swapaxes(image,0,2)
cv2.imwrite('generated.png',image)
def generate(BATCH_SIZE, nice=False):
generator = generator_model()
generator.compile(loss='binary_crossentropy', optimizer="SGD")
generator.load_weights('generator')
if nice:
discriminator = discriminator_model()
discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
discriminator.load_weights('discriminator')
noise = np.zeros((BATCH_SIZE*20, 100))
for i in range(BATCH_SIZE*20):
noise[i, :] = np.random.uniform(-1, 1, 100)
generated_images = generator.predict(noise, verbose=1)
d_pret = discriminator.predict(generated_images, verbose=1)
index = np.arange(0, BATCH_SIZE*20)
index.resize((BATCH_SIZE*20, 1))
pre_with_index = list(np.append(d_pret, index, axis=1))
pre_with_index.sort(key=lambda x: x[0], reverse=True)
nice_images = np.zeros((BATCH_SIZE, 1) +
(generated_images.shape[2:]), dtype=np.float32)
for i in range(int(BATCH_SIZE)):
idx = int(pre_with_index[i][1])
nice_images[i, 0, :, :] = generated_images[idx, 0, :, :]
image = combine_images(nice_images)
else:
noise = np.zeros((BATCH_SIZE, 100))
for i in range(BATCH_SIZE):
noise[i, :] = np.random.uniform(-1, 1, 100)
generated_images = generator.predict(noise, verbose=1)
image = combine_images(generated_images)
image = image*127.5+127.5
Image.fromarray(image.astype(np.uint8)).save(
"generated_image.png")
def fhan2_max(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')
wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs)
hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)
Si = GlobalMaxPooling1D()(hij)
wordEncoder = Model(wordInputs, Si)
# -----------------------------------------------------------------------------------------------
docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')
#sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)
sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs)
hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)
Vb = GlobalMaxPooling1D()(hi)
v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
model = Model(inputs=[docInputs] , outputs=[v6])
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model, wordEncoder
def han2(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')
#print 'in han2 max-nb-words'
#print MAX_NB_WORDS
wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs)
hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)
alpha_its, Si = AttentionLayer(name='att1')(hij)
#wordDrop = Dropout(DROPOUTPER, name='wordDrop')(Si)
wordEncoder = Model(wordInputs, Si)
# -----------------------------------------------------------------------------------------------
docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')
sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)
sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking)
hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)
alpha_s, Vb = AttentionLayer(name='att2')(hi)
#sentDrop = Dropout(DROPOUTPER, name='sentDrop')(Vb)
v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vb)
model = Model(inputs=[docInputs] , outputs=[v6])
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model, wordEncoder
def create_ft_extractor(self, type_mod, weights_path):
"""Extract the features from x using a convnet model."""
# model = convnet(type_mod, weights_path=weights_path, heatmap=False,
# W_regularizer=None,
# activity_regularizer=None,
# dense=False)
# sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(optimizer=sgd, loss="mse")
# print "Summary:", model.summary()
model = None
return model
def baseline_model():
# create model
input_shape = (1, 50, 50)
model = Sequential()
model.add(Conv2D(16, (3, 3),
activation='sigmoid',
strides=(1, 1),
data_format='channels_first',
padding='same',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
model.add(Conv2D(48, kernel_size=(3, 3),
activation='sigmoid',
strides=(1, 1),
data_format="channels_first",
padding="same",
input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3),
activation='sigmoid',
strides=(1, 1),
data_format="channels_first",
padding="same",
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
model.add(Conv2D(64, kernel_size=(3, 3),
activation='sigmoid',
strides=(1, 1),
data_format="channels_first",
padding="same",
input_shape=input_shape))
model.add(Flatten())
model.add(Dense(64, activation='sigmoid'))
model.add(Dense(68*2, activation='tanh'))
# Compile model
sgd = SGD(lr=1e-4, momentum=0.9, decay=1e-6, nesterov=False)
model.compile(loss='mean_squared_error', optimizer=sgd)
return model
layers_builder.py 文件源码
项目:PSPNet-Keras-tensorflow
作者: Vladkryvoruchko
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def build_pspnet(nb_classes, resnet_layers, input_shape, activation='softmax'):
"""Build PSPNet."""
print("Building a PSPNet based on ResNet %i expecting inputs of shape %s predicting %i classes" % (resnet_layers, input_shape, nb_classes))
inp = Input((input_shape[0], input_shape[1], 3))
res = ResNet(inp, layers=resnet_layers)
psp = build_pyramid_pooling_module(res, input_shape)
x = Conv2D(512, (3, 3), strides=(1, 1), padding="same", name="conv5_4",
use_bias=False)(psp)
x = BN(name="conv5_4_bn")(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="conv6")(x)
x = Lambda(Interp, arguments={'shape': (input_shape[0], input_shape[1])})(x)
x = Activation('softmax')(x)
model = Model(inputs=inp, outputs=x)
# Solver
sgd = SGD(lr=learning_rate, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
step5_train_nodule_detector.py 文件源码
项目:TC-Lung_nodules_detection
作者: Shicoder
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None, features=False, mal=False) -> Model:
inputs = Input(shape=input_shape, name="input_1")
x = inputs
#x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x)
x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x)
# 2nd layer group
x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x)
#if USE_DROPOUT:
# x = Dropout(p=0.3)(x)
# 3rd layer group
x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x)
x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x)
#if USE_DROPOUT:
# x = Dropout(p=0.4)(x)
# 4th layer group
x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x)
x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1),)(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x)
#if USE_DROPOUT:
# x = Dropout(p=0.5)(x)
last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x)
out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64)
out_class = Flatten(name="out_class")(out_class)
out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64)
out_malignancy = Flatten(name="out_malignancy")(out_malignancy)
model = Model(input=inputs, output=[out_class, out_malignancy])
if load_weight_path is not None:
model.load_weights(load_weight_path, by_name=False)
#model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error}, metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})
model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy"}, metrics={"out_class": [binary_accuracy, binary_crossentropy]})
if features:
model = Model(input=inputs, output=[last64])
model.summary(line_length=140)
return model
def test_sgd():
sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
_test_optimizer(sgd)
def test_ReduceLROnPlateau():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def _compile(self):
""" compile self.q_vals
"""
if (self._update_rule=="sgd"):
optimizer = SGD(lr=self._lr, momentum=self._momentum, nesterov=False)
elif (self._update_rule=="rmsprop"):
optimizer = RMSprop(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)
else:
raise Exception('The update_rule '+self._update_rule+' is not implemented.')
self.q_vals.compile(optimizer=optimizer, loss='mse')
def createModel(self):
model = Sequential()
model.add(Conv2D(16, (3, 3), strides=(2, 2), input_shape=(self.img_rows, self.img_cols, self.img_channels)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(16, (3, 3), strides=(2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(self.output_size))
# model.add(Activation('softmax'))
# model.compile(RMSprop(lr=self.learningRate), 'MSE')
# sgd = SGD(lr=self.learningRate)
adam = Adam(lr=self.learningRate)
model.compile(loss='mse', optimizer=adam)
model.summary()
return model
deeplearning.py 文件源码
项目:Q-A-Recommender-System-Machine-Learning
作者: Yuanxiang-Wu
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def model_Train(X_tr, Y_tr, arch, actfn='sigmoid', last_act='sigmoid', reg_coeff=0.0,
num_epoch=100, batch_size=1000, sgd_lr=1e-5, sgd_decay=0.0, sgd_mom=0.0,
sgd_Nesterov=False, EStop=False):
call_ES = EarlyStopping(monitor='val_acc', patience=6, mode='auto')
model = gen_Model(num_units=arch, actfn=actfn, reg_coeff=reg_coeff, last_act=last_act)
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
if EStop:
model.fit(X_tr, Y_tr, nb_epoch=num_epoch, batch_size=batch_size, callbacks=[call_ES],
validation_split=0.1, validation_data=None, shuffle=True)
else:
model.fit(X_tr, Y_tr, batch_size=100, nb_epoch=10, shuffle=True, verbose=1,show_accuracy=True,validation_split=0.2)
return model