def callbacks(self):
"""
:return:
"""
# TODO: Add ReduceLROnPlateau callback
cbs = []
tb = TensorBoard(log_dir=self.log_dir,
write_graph=True,
write_images=True)
cbs.append(tb)
best_model_filename = self.model_name + '_best.h5'
best_model = os.path.join(self.checkpoint_dir, best_model_filename)
save_best = ModelCheckpoint(best_model, save_best_only=True)
cbs.append(save_best)
checkpoints = ModelCheckpoint(filepath=self.checkpoint_file, verbose=1)
cbs.append(checkpoints)
reduce_lr = ReduceLROnPlateau(patience=1, verbose=1)
cbs.append(reduce_lr)
return cbs
python类TensorBoard()的实例源码
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log_path = LOG_PATH_BASE + ts + "_-_" + model.name
tensorboard = TensorBoard(log_dir=log_path,
write_graph=False, #This eats a lot of space. Enable with caution!
#histogram_freq = 1,
write_images=True,
batch_size = model.batch_size,
write_grads=True)
model_saver = ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
callbacks = [tensorboard, TerminateOnNaN(), model_saver]
for i in testrange:
acc = test_model(model, sequence_length=i, verboose=verboose)
print("the accuracy for length {0} was: {1}%".format(i,acc))
train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)
for i in testrange:
acc = test_model(model, sequence_length=i, verboose=verboose)
print("the accuracy for length {0} was: {1}%".format(i,acc))
return
def train_model(model, X, X_test, Y, Y_test):
batch_size = 100
epochs = 2
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
# Creates live data:
# For better yield. The duration of the training is extended.
# If you don't want, use this:
# model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
generated_data.fit(X)
model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/6, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints)
return model
def train_multilabel_bts(lang_db, imdb, pretrained, max_iters = 1000, loss_func = 'squared_hinge', box_method = 'random'):
# Create callback_list.
dir_path = osp.join('output', 'bts_ckpt', imdb.name)
tensor_path = osp.join(dir_path, 'log_dir')
if not osp.exists(dir_path):
os.makedirs(dir_path)
if not osp.exists(tensor_path):
os.makedirs(tensor_path)
ckpt_save = osp.join(dir_path, lang_db.name + '_multi_label_fixed_' + 'weights-{epoch:02d}.hdf5')
checkpoint = ModelCheckpoint(ckpt_save, monitor='loss', verbose=1, save_best_only=True)
early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=3, verbose=0, mode='auto')
tensorboard = TensorBoard(log_dir=dir_path, histogram_freq=2000, write_graph=True, write_images=False)
callback_list = [checkpoint, early_stop, tensorboard]
pretrained.fit_generator(load_multilabel_data(imdb, lang_db, pretrained, box_method),
steps_per_epoch = 5000,
epochs = max_iters,
verbose = 1,
callbacks = callback_list,
workers = 1)
pretrained.save(osp.join(dir_path, 'model_fixed' + imdb.name + '_' + lang_db.name + '_ML_' + box_method + '_' + loss_func + '.hdf5'))
def train():
model = build_main_residual_network(BATCH_SIZE,MAX_TIME_STEP,INPUT_DIM,OUTPUT_DIM,loop_depth=DEPTH)
# deal with x,y
# x_train = x
model.fit(x_train, y_train, validation_split=0.1, epochs=50 , callbacks=[TensorBoard(log_dir='./residual_cnn_dir_deep_%s_all'%(DEPTH))])
import random
randomIndex = random.randint(0, SAMPLE_NUM)
print('Selecting- %s as the sample' % (randomIndex))
pred = model.predict(x_train[randomIndex:randomIndex + 1])
print(pred)
print(y_train[randomIndex])
model.save(MODEL_PATH)
def _build_callbacks(self):
"""Build callback objects.
Returns:
A list containing the following callback objects:
- TensorBoard
- ModelCheckpoint
"""
tensorboard_path = os.path.join(self.checkpoints_path, 'tensorboard')
tensorboard = TensorBoard(log_dir=tensorboard_path)
checkpoint_path = os.path.join(self.checkpoints_path, self.checkpoint_file_format)
checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=self.save_best_only)
return [tensorboard, checkpointer]
def _build_callbacks(self):
"""Build callback objects.
Returns:
A list containing the following callback objects:
- TensorBoard
- ModelCheckpoint
"""
tensorboard_path = os.path.join(self.checkpoints_path, 'tensorboard')
tensorboard = TensorBoard(log_dir=tensorboard_path)
checkpoint_path = os.path.join(self.checkpoints_path, self.checkpoint_file_format)
checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=self.save_best_only)
return [tensorboard, checkpointer]
def get_callbacks(config_data, appendix=''):
ret_callbacks = []
model_stored = False
callbacks = config_data['callbacks']
if K._BACKEND == 'tensorflow':
tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
ret_callbacks.append(tensor_board)
for callback in callbacks:
if callback['name'] == 'early_stopping':
ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
elif callback['name'] == 'model_checkpoit':
model_stored = True
path = config_data['output_path']
basename = config_data['output_basename']
base_path = os.path.join(path, basename)
opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
save_best = bool(callback['save_best_only'])
ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
return ret_callbacks, model_stored
def train(train_generator,train_size,input_num,dims_num):
print("Start Train Job! ")
start=time.time()
inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
layer1=Dense(100,activation="relu")
layer2=Dense(20,activation="relu")
flatten=Flatten()
layer3=Dense(2,activation="softmax",name="Output")
optimizer=Adam()
model=Sequential()
model.add(inputs)
model.add(layer1)
model.add(Dropout(0.5))
model.add(layer2)
model.add(Dropout(0.5))
model.add(flatten)
model.add(layer3)
call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
# model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
model.save(model_dir)
end=time.time()
print("Over train job in %f s"%(end-start))
def train(train_generator,train_size,input_num,dims_num):
print("Start Train Job! ")
start=time.time()
inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
layer1=LSTM(128)
output=Dense(2,activation="softmax",name="Output")
optimizer=Adam()
model=Sequential()
model.add(inputs)
model.add(layer1)
model.add(Dropout(0.5))
model.add(output)
call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
# model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
model.save(model_dir)
end=time.time()
print("Over train job in %f s"%(end-start))
def trainModel():
# Create models
print("Creating VAE...")
vae, _, _ = getModels()
vae.compile(optimizer='rmsprop', loss=VAELoss)
print("Loading dataset...")
X_train, X_test = loadDataset()
X_train = X_train
X_test = X_test
# Train the VAE on dataset
print("Training VAE...")
runID = "VAE - ZZZ"
tb = TensorBoard(log_dir='/tmp/logs/'+runID)
vae.fit(X_train, X_train, shuffle=True, nb_epoch=nbEpoch, batch_size=batchSize, validation_data=(X_test, X_test), callbacks=[tb])
# Serialize weights to HDF5
print("Saving weights...")
vae.save_weights(modelsPath+"model.h5")
# Generates images and plots
def train_discriminator(nsteps):
mean_loss = 0.0
for i in range(1,nsteps):
# pick real samples
batch_indeces = np.random.randint(0,O_train.shape[0],args.batch_size)
y_real = Y_train[batch_indeces,:,:,:]
# pick fake samples
batch_indeces = np.random.randint(0,O_train.shape[0],args.batch_size)
o_in = O_train[batch_indeces,:,:,:]
t_in = T_train[batch_indeces,:,:,:]
y_in = Y_train[batch_indeces,:,:,:]
y_fake = generator.predict([o_in, t_in, y_in])[0]
# train
y_disc = np.vstack([y_real, y_fake])
r = adversary.fit(y_disc, d_disc,
#callbacks=[TensorBoard(log_dir=args.tblog + '_D', write_graph=False)],
verbose=0)
loss = r.history['loss'][0]
mean_loss = mean_loss + loss
return mean_loss / nsteps
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_CHECKPOINT_FILENAME,
save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator,
samples_per_epoch=SAMPLES_PER_EPOCH,
nb_epoch=NUMBER_OF_EPOCHS,
validation_data=validation_batch_generator,
nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger, show_samples_callback],
verbose=1,
initial_epoch=initial_epoch)
def train_model(model, X, X_test, Y, Y_test):
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints = []
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
return model
def create_callbacks(self, callback: Callable[[], None], tensor_board_log_directory: Path, net_directory: Path,
callback_step: int = 1, save_step: int = 1) -> List[Callback]:
class CustomCallback(Callback):
def on_epoch_end(self_callback, epoch, logs=()):
if epoch % callback_step == 0:
callback()
if epoch % save_step == 0 and epoch > 0:
mkdir(net_directory)
self.predictive_net.save_weights(str(net_directory / self.model_file_name(epoch)))
tensorboard_if_running_tensorflow = [TensorBoard(
log_dir=str(tensor_board_log_directory), write_images=True)] if backend.backend() == 'tensorflow' else []
return tensorboard_if_running_tensorflow + [CustomCallback()]
def make_model(input_shape, nb_epochs=100, batch_size=128, lr=0.01, n_layers=1, n_hidden=16, rate_dropout=0.3):
model_path = 'model.%s' % input_shape[0]
wp = WindPuller(input_shape=input_shape, lr=lr, n_layers=n_layers, n_hidden=n_hidden, rate_dropout=rate_dropout)
train_set, test_set = read_ultimate("./", input_shape)
wp.fit(train_set.images, train_set.labels, batch_size=batch_size,
nb_epoch=nb_epochs, shuffle=True, verbose=1,
validation_data=(test_set.images, test_set.labels),
callbacks=[TensorBoard(histogram_freq=1),
ModelCheckpoint(filepath=model_path+'.best', save_best_only=True, mode='min')])
scores = wp.evaluate(test_set.images, test_set.labels, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
wp.model.save(model_path)
saved_wp = wp.load_model(model_path)
scores = saved_wp.evaluate(test_set.images, test_set.labels, verbose=0)
print('Test loss:', scores[0])
print('test accuracy:', scores[1])
pred = saved_wp.predict(test_set.images, 1024)
# print(pred)
# print(test_set.labels)
pred = numpy.reshape(pred, [-1])
result = numpy.array([pred, test_set.labels]).transpose()
with open('output.' + str(input_shape[0]), 'w') as fp:
for i in range(result.shape[0]):
for val in result[i]:
fp.write(str(val) + "\t")
fp.write('\n')
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
def get_callbacks(experiment_dir, checkpoint_monitor='val_acc'):
callbacks = []
# save model checkpoints
filepath = os.path.join(experiment_dir,
'checkpoints',
'checkpoint-epoch_{epoch:03d}-val_acc_{val_acc:.3f}.hdf5')
callbacks.append(ModelCheckpoint(filepath,
monitor=checkpoint_monitor,
verbose=1,
save_best_only=False,
mode='max'))
callbacks.append(ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=3,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=0,
min_lr=0))
callbacks.append(TensorBoard(log_dir=os.path.join(experiment_dir, 'tensorboard-logs'),
histogram_freq=0,
write_graph=True,
write_images=False))
return callbacks
def train_model(model, X, X_test, Y, Y_test):
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
return model
def train():
model = build_stateful_lstm_model(BATCH_SIZE, TIME_STEP, INPUT_DIM, OUTPUT_DIM, dropout=0.1)
# model.fit(x_train,y_train,validation_data=(x_train[:10],y_train[:10]),epochs=5,callbacks=[TensorBoard()],batch_size=1)
for index, y_dat in enumerate(y):
print('Run test on %s' % (index))
model.fit(np.array([x[index]]), y_dat.reshape(1, 3),
validation_data=(np.array([x[index]]), y_dat.reshape(1, 3)), epochs=10, callbacks=[TensorBoard()])
model.save(MODEL_PATH)
x_pred = model.predict(np.array([x[index]]))
print(x_pred)
print(y_dat)
model.save(MODEL_PATH)
test_directed_timestep_LSTM.py 文件源码
项目:keras_detect_tool_wear
作者: kidozh
项目源码
文件源码
阅读 60
收藏 0
点赞 0
评论 0
def train():
model = build_real_stateful_lstm_model_with_normalization(BATCH_SIZE, TIME_STEP, INPUT_DIM, OUTPUT_DIM)
# deal with x,y
# x_train = x
model.fit(x_train[:SAMPLE_NUM//BATCH_SIZE*BATCH_SIZE],
y_train[:SAMPLE_NUM//BATCH_SIZE*BATCH_SIZE],
batch_size=BATCH_SIZE,
validation_split=0,
epochs=30, callbacks=[TensorBoard(log_dir='./stateful_lstm_fixed')])
# for index,y_dat in enumerate(y):
# print('Run test on %s' %(index))
# # print(y_dat.reshape(3,1))
# model.fit(np.array([x[index]]),np.array([y_dat.reshape(1,3)]),validation_data=(np.array([x[index]]),np.array([y_dat.reshape(1,3)])),epochs=100,callbacks=[TensorBoard()])
# model.save(MODEL_PATH)
# x_pred = model.predict(np.array([x[index]]))
# print(x_pred,x_pred.shape)
# print(np.array([y_dat.reshape(1,3)]))
import random
randomIndex = random.randint(0, SAMPLE_NUM)
print('Selecting %s as the sample' % (randomIndex))
pred = model.predict(x_train[randomIndex:randomIndex + 1])
print(pred)
print(y_train[randomIndex])
model.save(MODEL_PATH)
main_residual_network_freq.py 文件源码
项目:keras_detect_tool_wear
作者: kidozh
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def train():
print('Done')
model = build_2d_main_residual_network(BATCH_SIZE,MAX_TIME_STEP,INPUT_DIM,2,OUTPUT_DIM,loop_depth=DEPTH)
# model = build_main_residual_network(BATCH_SIZE,MAX_TIME_STEP,INPUT_DIM,OUTPUT_DIM,loop_depth=DEPTH)
# deal with x,y
# x_train = x
model.fit(x_train, y_train, validation_split=0.1, epochs=50, callbacks=[TensorBoard(log_dir='./residual_freq_cnn_dir_deep_%s_all'%(DEPTH))])
import random
randomIndex = random.randint(0, SAMPLE_NUM)
print('Selecting- %s as the sample' % (randomIndex))
pred = model.predict(x_train[randomIndex:randomIndex + 1])
print(pred)
print(y_train[randomIndex])
model.save(MODEL_PATH)
def train():
model = build_stateful_lstm_model_with_normalization(BATCH_SIZE, TIME_STEP, INPUT_DIM, OUTPUT_DIM, dropout=0.1)
# model.fit(x_train,y_train,validation_data=(x_train[:10],y_train[:10]),epochs=5,callbacks=[TensorBoard()],batch_size=1)
for index, y_dat in enumerate(y):
print('Run test on %s' % (index))
model.fit(np.array([x[index]]), y_dat.reshape(1, 3),
validation_data=(np.array([x[index]]), y_dat.reshape(1, 3)), epochs=10, callbacks=[TensorBoard()])
model.save(MODEL_PATH)
x_pred = model.predict(np.array([x[index]]))
print(x_pred)
print(y_dat)
model.save(MODEL_PATH)
def train_bts(lang_db, imdb, max_iters = 1000, loss = 'squared_hinge'):
# Define network
model = define_network(lang_db.vector_size, loss)
#model = load_model(osp.join('output', 'bts_ckpt', 'imagenet1k_train_bts', 'glove_wiki_300_hinge_weights-03.hdf5'))
# Create callback_list.
dir_path = osp.join('output', 'bts_ckpt', imdb.name)
if not osp.exists(dir_path):
os.makedirs(dir_path)
log_dir = osp.join('output', 'bts_logs', imdb.name)
if not osp.exists(log_dir):
os.makedirs(log_dir)
ckpt_save = osp.join(dir_path, lang_db.name + "_" + loss + "_weights-{epoch:02d}.hdf5")
checkpoint = ModelCheckpoint(ckpt_save, monitor='val_loss', verbose=1, save_best_only = True)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=False)
callback_list = [checkpoint, early_stop, tensorboard]
model.fit_generator(load_data(imdb, lang_db),
steps_per_epoch = 5000,
epochs = max_iters,
verbose = 1,
validation_data = imdb.load_val_data(lang_db),
validation_steps = 20000, # number of images to validate on
callbacks = callback_list,
workers = 1)
model.save(osp.join(dir_path, 'model_' + imdb.name + '_' + lang_db.name + '_' + loss + '_l2.hdf5'))
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
def conv_autoencode_mnist():
(x_train, y_train), (x_test, y_test) = load_mnist(flatten=False)
autoencoder = build_conv_autoencoder()
autoencoder.summary()
autoencoder.fit(x_train, x_train,
epochs=55,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test),
callbacks=[TensorBoard(log_dir='./tmp/autoencoder')])
decoded_imgs = autoencoder.predict(x_test)
plot_imgs_and_reconstructions(x_test, decoded_imgs, n=10)
def conv_autoencode_cifar():
(x_train, y_train), (x_test, y_test) = load_cifar(flatten=False)
autoencoder = build_conv_autoencoder(input_dim=(32,32,3))
autoencoder.summary()
autoencoder.fit(x_train, x_train,
epochs=25,
batch_size=64,
shuffle=True,
validation_data=(x_test, x_test),
callbacks=[TensorBoard(log_dir='./tmp/autoencoder')])
decoded_imgs = autoencoder.predict(x_test)
plot_imgs_and_reconstructions(x_test, decoded_imgs, n=10, shape=(32,32,3))
def train_model(model, X_1, X_2, Y):
batch_size = 1
epochs = 10
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
model.fit([X_1, X_2], Y, batch_size=batch_size, epochs=epochs, validation_data=([X_1, X_2], Y), shuffle=True, callbacks=checkpoints)
return model
def train(self, train_batches, valid_batches, samples_per_epoch, nb_epoch, nb_val_samples, extra_callbacks=None):
"""Train the model.
Automatically adds the following Keras callbacks:
- ModelCheckpoint
- EarlyStopping
- TensorBoard
Args:
train_batches (Iterable[Batch]): an iterable of training Batches
valid_batches (Iterable[Batch]): an iterable of validation Batches
samples_per_epoch (int)
nb_epoch (int): max number of epochs to train for
nb_val_samples (int): number of samples for validation
extra_callbacks (list): a list of additional Keras callbacks to run
"""
checkpoint_path = join(self.checkpoint_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
checkpointer = ModelCheckpoint(checkpoint_path, verbose=1, save_best_only=False)
early_stopper = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
tboard = TensorBoard(self.tensorboard_dir, write_graph=False)
callbacks = [checkpointer, early_stopper, tboard]
if extra_callbacks:
callbacks.extend(extra_callbacks)
train = self._vectorized_batches(train_batches)
valid = self._vectorized_batches(valid_batches)
self.keras_model.fit_generator(train, samples_per_epoch, nb_epoch,
callbacks=callbacks,
validation_data=valid, nb_val_samples=nb_val_samples
)
def train(self, train_batches, valid_batches, samples_per_epoch, nb_epoch, nb_val_samples, extra_callbacks=None):
"""Train the model.
Automatically adds the following Keras callbacks:
- ModelCheckpoint
- EarlyStopping
- TensorBoard
Args:
train_batches (Iterable[Batch]): an iterable of training Batches
valid_batches (Iterable[Batch]): an iterable of validation Batches
samples_per_epoch (int)
nb_epoch (int): max number of epochs to train for
nb_val_samples (int): number of samples for validation
extra_callbacks (list): a list of additional Keras callbacks to run
"""
checkpoint_path = join(self.checkpoint_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
checkpointer = ModelCheckpoint(checkpoint_path, verbose=1, save_best_only=False)
early_stopper = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
tboard = TensorBoard(self.tensorboard_dir, write_graph=False)
callbacks = [checkpointer, early_stopper, tboard]
if extra_callbacks:
callbacks.extend(extra_callbacks)
train = self._vectorized_batches(train_batches)
valid = self._vectorized_batches(valid_batches)
self.keras_model.fit_generator(train, samples_per_epoch, nb_epoch,
callbacks=callbacks,
validation_data=valid, nb_val_samples=nb_val_samples
)