def run_parallel_test(data_generator):
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model = Model([a, b], [a_2, b_2])
model = make_parallel(model, 2)
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None)
trained_epochs = []
tracker_cb = LambdaCallback(on_epoch_begin=lambda epoch, logs: trained_epochs.append(epoch))
model.fit_generator(data_generator(4),
steps_per_epoch=3,
epochs=5,
initial_epoch=2,
callbacks=[tracker_cb])
assert trained_epochs == [2, 3, 4]
python类LambdaCallback()的实例源码
def fit_model(self, logging_uuid, model=None, epochs=1000, batch_size=10):
if model is not None:
self.model = model
X, y, _ = self.get_formulation_training_data()
scaler = StandardScaler().fit(X)
lcb = LambdaCallback(
on_epoch_end=
lambda epoch, logs:
r.set(logging_uuid, json.dumps({'model_state': 'training',
'epoch': epoch,
'epochs': epochs,
'loss': logs['loss']})),
on_train_end=
lambda logs:
r.set(logging_uuid, json.dumps({'model_state': 'training',
'epoch': epochs,
'epochs': epochs})),
)
self.fit_history = self.model.fit(scaler.transform(X), y,
epochs=epochs,
batch_size=batch_size,
verbose=0,
callbacks=[lcb])
return self.model, self.fit_history
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_CHECKPOINT_FILENAME,
save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator,
samples_per_epoch=SAMPLES_PER_EPOCH,
nb_epoch=NUMBER_OF_EPOCHS,
validation_data=validation_batch_generator,
nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger, show_samples_callback],
verbose=1,
initial_epoch=initial_epoch)
def image_saver_callback(model, directory, epoch_interval=1, batch_interval=100, cmap='gray', render_videos=False):
def save_image(weights, batch, layer_name, i):
global current_epoch
weight = str(i + 1).zfill(2)
epoch = str(current_epoch).zfill(3)
fold = os.path.join(directory, 'epoch_{}-layer_{}-weights_{}'.format(epoch, layer_name, weight))
if not os.path.isdir(fold):
os.makedirs(fold)
name = os.path.join('{}'.format(fold),
'{}_{}x{}.png'.format(str(batch).zfill(9),
weights.shape[0], weights.shape[1]))
plt.imsave(name, weights, cmap=cmap)
def save_weight_images(batch, logs):
global current_epoch
if current_epoch % epoch_interval == 0 and batch % batch_interval == 0:
for layer in model.layers:
if len(layer.get_weights()) > 0:
for i, weights in enumerate(layer.get_weights()):
if len(weights.shape) < 2:
weights = np.expand_dims(weights, axis=0)
save_image(weights, batch, layer.name, i)
def on_epoch_begin(epoch, logs):
global current_epoch
current_epoch = epoch
def on_train_end(logs):
src = os.path.dirname(os.path.abspath(__file__))
cmd = os.path.join(src, '..', 'bin', 'create_image_sequence.sh')
print(os.system('{} {}'.format(cmd, directory)))
kwargs = dict()
kwargs['on_batch_begin'] = save_weight_images
kwargs['on_epoch_begin'] = on_epoch_begin
if render_videos:
kwargs['on_train_end'] = on_train_end
return LambdaCallback(**kwargs)
def __init__(self,path,parameters={}):
import subprocess
subprocess.call(["mkdir","-p",path])
self.path = path
self.built = False
self.loaded = False
self.verbose = True
self.parameters = parameters
self.custom_log_functions = {}
self.metrics = []
import datetime
self.callbacks = [LambdaCallback(on_epoch_end=self.bar_update),
keras.callbacks.TensorBoard(log_dir=self.local('logs/{}'.format(datetime.datetime.now().isoformat())), write_graph=False)]
def _build(self,input_shape):
data_dim = np.prod(input_shape)
self.gs = self.build_gs()
self.gs2 = self.build_gs(N=data_dim)
self.gs3 = self.build_gs(N=data_dim)
_encoder = self.build_encoder(input_shape)
_decoder = self.build_decoder(input_shape)
x = Input(shape=input_shape)
z = Sequential([flatten, *_encoder, self.gs])(x)
y = Sequential([flatten,
*_decoder,
self.gs2,
Lambda(take_true),
Reshape(input_shape)])(z)
z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
y2 = Sequential([flatten,
*_decoder,
self.gs3,
Lambda(take_true),
Reshape(input_shape)])(z2)
def rec(x, y):
return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
K.reshape(y,(K.shape(x)[0],data_dim,)))
def loss(x, y):
return rec(x,y) + self.gs.loss() + self.gs2.loss()
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs3.cool))
self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
self.loss = loss
self.metrics.append(rec)
self.encoder = Model(x, z)
self.decoder = Model(z2, y2)
self.net = Model(x, y)
self.autoencoder = self.net
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
def __enter__(self):
chk = ModelCheckpoint(self.checkpoint, verbose=0, save_best_only=False,
save_weights_only=False, mode='auto')
csv_logger = CSVLogger('training.log')
snaps = LambdaCallback(on_epoch_end=lambda epoch, logs: self.snap(epoch))
return [chk, csv_logger, snaps]
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
def fit(self, data_stream,
nvis=20,
nbatch=128,
niter=1000,
opt=None,
save_dir='./'):
if opt == None: opt = Adam(lr=0.0001)
if not os.path.exists(save_dir): os.makedirs(save_dir)
ae = self.autoencoder
ae.compile(optimizer=opt, loss='mse')
vis_grid(data_stream().next(), (1, 20), '{}/sample.png'.format(save_dir))
sampleX = transform(data_stream().next()[:nvis])
vis_grid(inverse_transform(np.concatenate([sampleX, ae.predict(sampleX)], axis=0)), (2, 20), '{}/sample_generate.png'.format(save_dir))
def vis_grid_f(epoch, logs):
vis_grid(inverse_transform(np.concatenate([sampleX, ae.predict(sampleX)], axis=0)), (2, 20), '{}/{}.png'.format(save_dir, epoch))
if epoch % 50 == 0:
ae.save_weights('{}/{}_ae_params.h5'.format(save_dir, epoch), overwrite=True)
def transform_wrapper():
for data in data_stream():
yield transform(data), transform(data)
ae.fit_generator(transform_wrapper(),
samples_per_epoch=nbatch,
nb_epoch=niter,
verbose=1,
callbacks=[LambdaCallback(on_epoch_end=vis_grid_f)],
)
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
def _build(self,input_shape):
_encoder = self.build_encoder(input_shape)
_decoder = self.build_decoder(input_shape)
self.gs = self.build_gs()
self.gs2 = self.build_gs()
x = Input(shape=input_shape)
z = Sequential([flatten, *_encoder, self.gs])(x)
y = Sequential(_decoder)(flatten(z))
z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
y2 = Sequential(_decoder)(flatten(z2))
w2 = Sequential([*_encoder, self.gs2])(flatten(y2))
data_dim = np.prod(input_shape)
def rec(x, y):
#return K.mean(K.binary_crossentropy(x,y))
return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
K.reshape(y,(K.shape(x)[0],data_dim,)))
def loss(x, y):
return rec(x,y) + self.gs.loss()
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
self.loss = loss
self.metrics.append(rec)
self.encoder = Model(x, z)
self.decoder = Model(z2, y2)
self.autoencoder = Model(x, y)
self.autodecoder = Model(z2, w2)
self.net = self.autoencoder
y2_downsample = Sequential([
Reshape((*input_shape,1)),
MaxPooling2D((2,2))
])(y2)
shape = K.int_shape(y2_downsample)[1:3]
self.decoder_downsample = Model(z2, Reshape(shape)(y2_downsample))
self.features = Model(x, Sequential([flatten, *_encoder[:-2]])(x))
if 'lr_epoch' in self.parameters:
ratio = self.parameters['lr_epoch']
else:
ratio = 0.5
self.callbacks.append(
LearningRateScheduler(lambda epoch: self.parameters['lr'] if epoch < self.parameters['full_epoch'] * ratio else self.parameters['lr']*0.1))
self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
def _build(self,input_shape):
dim = np.prod(input_shape) // 2
print("{} latent bits".format(dim))
M, N = self.parameters['M'], self.parameters['N']
x = Input(shape=input_shape)
_pre = tf.slice(x, [0,0], [-1,dim])
_suc = tf.slice(x, [0,dim], [-1,dim])
pre = wrap(x,_pre,name="pre")
suc = wrap(x,_suc,name="suc")
print("encoder")
_encoder = self.build_encoder([dim])
action_logit = ConditionalSequential(_encoder, pre, axis=1)(suc)
gs = self.build_gs()
action = gs(action_logit)
print("decoder")
_decoder = self.build_decoder([dim])
suc_reconstruction = ConditionalSequential(_decoder, pre, axis=1)(flatten(action))
y = Concatenate(axis=1)([pre,suc_reconstruction])
action2 = Input(shape=(N,M))
pre2 = Input(shape=(dim,))
suc_reconstruction2 = ConditionalSequential(_decoder, pre2, axis=1)(flatten(action2))
y2 = Concatenate(axis=1)([pre2,suc_reconstruction2])
def rec(x, y):
return bce(K.reshape(x,(K.shape(x)[0],dim*2,)),
K.reshape(y,(K.shape(x)[0],dim*2,)))
def loss(x, y):
kl_loss = gs.loss()
reconstruction_loss = rec(x, y)
return reconstruction_loss + kl_loss
self.metrics.append(rec)
self.callbacks.append(LambdaCallback(on_epoch_end=gs.cool))
self.custom_log_functions['tau'] = lambda: K.get_value(gs.tau)
self.loss = loss
self.encoder = Model(x, [pre,action])
self.decoder = Model([pre2,action2], y2)
self.net = Model(x, y)
self.autoencoder = self.net
def train(self, train_data, valid_data, batch_size, epochs, opt_name, lr, grad_clip):
"""
?????
"""
def save_weight_on_epoch_end(epoch, e_logs):
filename = "{}weight-epoch{}-{}-{}.h5".format(self.weight_path,
time.strftime("%Y-%m-%d-(%H-%M)", time.localtime()),
epoch,
e_logs['val_acc'])
self.model.save_weights(filepath=filename)
checkpointer = LambdaCallback(on_epoch_end=save_weight_on_epoch_end)
# tensorboard = TensorBoard(log_dir="./logs", histogram_freq=1, write_images=True)
earlystopping = EarlyStopping(monitor="val_loss", patience=3, verbose=1)
# ????????
questions_ok, documents_ok, context_mask, candidates_ok, y_true = self.preprocess_input_sequences(train_data)
v_questions, v_documents, v_context_mask, v_candidates, v_y_true = self.preprocess_input_sequences(valid_data)
if opt_name == "SGD":
optimizer = SGD(lr=lr, decay=1e-6, clipvalue=grad_clip)
elif opt_name == "ADAM":
optimizer = Adam(lr=lr, clipvalue=grad_clip)
else:
raise NotImplementedError("Other Optimizer Not Implemented.-_-||")
self.model.compile(optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"])
# ?????????
self.load_weight()
data = {"q_input": questions_ok,
"d_input": documents_ok,
"context_mask": context_mask,
"candidates_bi": candidates_ok}
v_data = {"q_input": v_questions,
"d_input": v_documents,
"context_mask": v_context_mask,
"candidates_bi": v_candidates}
logs = self.model.fit(x=data,
y=y_true,
batch_size=batch_size,
epochs=epochs,
validation_data=(v_data, v_y_true),
callbacks=[checkpointer, earlystopping])
def train():
c_i = pickle.loads( open("c_i.pkl", "rb").read() )
i_c = {i:c for c,i in c_i.items() }
xss = []
yss = []
for gi, pkl in enumerate(glob.glob("data/*.pkl")):
if gi > 500:
break
o = pickle.loads( open(pkl, "rb").read() )
img = o["image"]
kana = o["kana"]
print( kana )
xss.append( np.array(img) )
ys = [[0. for i in range(128) ] for j in range(50)]
for i,k in enumerate(list(kana[:50])):
try:
ys[i][c_i[k]] = 1.
except KeyError as e:
print(e)
yss.append( ys )
Xs = np.array( xss )
Ys = np.array( yss )
print(Xs.shape)
#optims = [Adam(lr=0.001), SGD(lr=0.01)]
optims = [Adam(), SGD(), RMSprop()]
if '--resume' in sys.argv:
"""
optims = [ Adam(lr=0.001), \
Adam(lr=0.0005), \
Adam(lr=0.0001), \
Adam(lr=0.00005), \
SGD(lr=0.01), \
SGD(lr=0.005), \
SGD(lr=0.001), \
SGD(lr=0.0005), \
]
"""
model = sorted( glob.glob("models/*.h5") ).pop(0)
print("loaded model is ", model)
t2i.load_weights(model)
for i in range(2000):
print_callback = LambdaCallback(on_epoch_end=callbacks)
batch_size = random.choice( [8] )
random_optim = random.choice( optims )
print( random_optim )
t2i.optimizer = random_optim
t2i.fit( Xs, Ys, shuffle=True, batch_size=batch_size, epochs=20, callbacks=[print_callback] )
if i%50 == 0:
t2i.save("models/%9f_%09d.h5"%(buff['loss'], i))
lossrate = buff["loss"]
os.system("echo \"{} {}\" `date` >> loss.log".format(i, lossrate))
print("saved ..")
print("logs...", buff )
def train():
c_i = pickle.loads( open("c_i.pkl", "rb").read() )
i_c = {i:c for c,i in c_i.items() }
xss = []
yss = []
for gi, pkl in enumerate(glob.glob("data/*.pkl")):
if gi > 3000:
break
o = pickle.loads( open(pkl, "rb").read() )
img = o["image"]
kana = o["kana"]
print( kana )
xss.append( np.array(img) )
ys = [[0. for i in range(128) ] for j in range(50)]
for i,k in enumerate(list(kana[:50])):
try:
ys[i][c_i[k]] = 1.
except KeyError as e:
print(e)
yss.append( ys )
Xs = np.array( xss )
Ys = np.array( yss )
optims = [Adam(), SGD(), RMSprop()]
if '--resume' in sys.argv:
#optims = [Adam(lr=0.0005), SGD(lr=0.005)]
model = sorted( glob.glob("models/*.h5") ).pop(0)
print("loaded model is ", model)
t2i.load_weights(model)
for i in range(2000):
print_callback = LambdaCallback(on_epoch_end=callbacks)
batch_size = random.choice( [32, 64, 128] )
random_optim = random.choice( optims )
print( random_optim )
t2i.optimizer = random_optim
t2i.fit( Xs, Ys, shuffle=True, batch_size=batch_size, epochs=5, callbacks=[print_callback] )
if i%2 == 0:
t2i.save("models/%9f_%09d.h5"%(buff['loss'], i))
lossrate = buff["loss"]
os.system("echo \"{} {}\" `date` >> loss.log".format(i, lossrate))
print("saved ..")
print("logs...", buff )