def save(self, path):
import os
import os.path as osp
try:
import cPickle as pickle
except:
import pickle
try:
os.mkdir(path)
except:
pass
with open(osp.join(path, 'args.pickled'), 'w') as f:
pickle.dump((self._args, self._kwargs), f)
with open(osp.join(path, 'weights.pickled'), 'w') as f:
pickle.dump(
layers.get_all_param_values(self.outputs),
f
)
python类get_all_param_values()的实例源码
def loadModel(filename):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
data = pickle.load(f)
#for training, we only want to load the model params
net = data['net']
params = l.get_all_param_values(net)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
def save_model(self, save_path):
data = L.get_all_param_values(self.network)
with open(save_path, 'w') as f:
pickle.dump(data, f)
def __init__(self, output_layer):
self.output_layer = output_layer
self.layers = self._collect_layers()
self.parameters = L.get_all_param_values(self.output_layer)
self.stats_f = None
def saveParams(epoch, params=None):
print "EXPORTING MODEL PARAMS...",
if params == None:
params = l.get_all_param_values(NET)
net_filename = MODEL_PATH + "birdCLEF_" + RUN_NAME + "_model_params_epoch_" + str(epoch) + ".pkl"
if not os.path.exists(MODEL_PATH):
os.makedirs(MODEL_PATH)
with open(net_filename, 'w') as f:
pickle.dump(params, f)
print "DONE!"
def save_model(self, epoch=None):
if epoch is not None:
fname = self.conf.save_model.replace('%e', str(epoch).zfill(5))
else:
fname = self.conf.save_model.replace('%e', 'final')
if self.conf.verbosity > 1:
print "Saving model to", fname
np.savez(fname, *get_all_param_values(self.autoencoder))
def save_model(self, save_path):
data = L.get_all_param_values(self.network)
with open(save_path, 'w') as f:
pickle.dump(data, f)
cnn_cascade_lasagne.py 文件源码
项目:Cascade-CNN-Face-Detection
作者: gogolgrind
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def save_model(self,model_name = nn_name+'.npz'):
sp.savez(model_name, *layers.get_all_param_values(self.net))
def melt(self):
ls.set_all_param_values(self.frozen_network, ls.get_all_param_values(self.network))
def save_params(self, filename, quiet=False):
if not quiet:
print "Saving network weights to " + filename + "..."
self._prepare_for_save()
params = get_all_param_values(self.approximator.network)
pickle.dump(params, open(filename, "wb"))
if not quiet:
print "Saving finished."
# Loads network weights from the file
def get_network_architecture(self):
return get_all_param_values(self.get_network())
def save(self, filename=None, quiet=False):
if filename is None:
filename = self.params_file
if not quiet:
print "Saving qengine to " + filename + "..."
self._prepare_for_save()
network_params = get_all_param_values(self.approximator.network)
params = [self.setup, network_params]
pickle.dump(params, open(filename, "wb"))
if not quiet:
print "Saving finished."
def save_model(self, save_path):
with open(save_path, 'w') as f:
data = L.get_all_param_values(self.network)
pkl.dump(data, f)
for item in self.trackers:
data = L.get_all_param_values(item)
pkl.dump(data, f)
def save_model(self, save_path):
data = L.get_all_param_values(self.network)
with open(save_path, 'w') as f:
pkl.dump(data, f)
def params(self, **tags):
return layers.get_all_param_values(self.outputs, **tags)
def weights(self):
return layers.get_all_param_values(self.outputs)
def build_instrument_model(self, n_vars, **kwargs):
targets = TT.vector()
instrument_vars = TT.matrix()
instruments = layers.InputLayer((None, n_vars), instrument_vars)
instruments = layers.DropoutLayer(instruments, p=0.2)
dense_layer = layers.DenseLayer(instruments, kwargs['dense_size'], nonlinearity=nonlinearities.tanh)
dense_layer = layers.DropoutLayer(dense_layer, p=0.2)
for _ in xrange(kwargs['n_dense_layers'] - 1):
dense_layer = layers.DenseLayer(dense_layer, kwargs['dense_size'], nonlinearity=nonlinearities.tanh)
dense_layer = layers.DropoutLayer(dense_layer, p=0.5)
self.instrument_output = layers.DenseLayer(dense_layer, 1, nonlinearity=nonlinearities.linear)
init_params = layers.get_all_param_values(self.instrument_output)
prediction = layers.get_output(self.instrument_output, deterministic=False)
test_prediction = layers.get_output(self.instrument_output, deterministic=True)
# flexible here, endog variable can be categorical, continuous, etc.
l2_cost = regularization.regularize_network_params(self.instrument_output, regularization.l2)
loss = objectives.squared_error(prediction.flatten(), targets.flatten()).mean() + 1e-4 * l2_cost
loss_total = objectives.squared_error(prediction.flatten(), targets.flatten()).mean()
params = layers.get_all_params(self.instrument_output, trainable=True)
param_updates = updates.adadelta(loss, params)
self._instrument_train_fn = theano.function(
[
targets,
instrument_vars,
],
loss,
updates=param_updates
)
self._instrument_loss_fn = theano.function(
[
targets,
instrument_vars,
],
loss_total
)
self._instrument_output_fn = theano.function([instrument_vars], test_prediction)
return init_params
def build_treatment_model(self, n_vars, **kwargs):
input_vars = TT.matrix()
instrument_vars = TT.matrix()
targets = TT.vector()
inputs = layers.InputLayer((None, n_vars), input_vars)
inputs = layers.DropoutLayer(inputs, p=0.2)
dense_layer = layers.DenseLayer(inputs, 2 * kwargs['dense_size'], nonlinearity=nonlinearities.rectify)
dense_layer = layers.batch_norm(dense_layer)
dense_layer= layers.DropoutLayer(dense_layer, p=0.2)
for _ in xrange(kwargs['n_dense_layers'] - 1):
dense_layer = layers.DenseLayer(dense_layer, kwargs['dense_size'], nonlinearity=nonlinearities.rectify)
dense_layer = layers.batch_norm(dense_layer)
self.treatment_output = layers.DenseLayer(dense_layer, 1, nonlinearity=nonlinearities.linear)
init_params = layers.get_all_param_values(self.treatment_output)
prediction = layers.get_output(self.treatment_output, deterministic=False)
test_prediction = layers.get_output(self.treatment_output, deterministic=True)
l2_cost = regularization.regularize_network_params(self.treatment_output, regularization.l2)
loss = gmm_loss(prediction, targets, instrument_vars) + 1e-4 * l2_cost
params = layers.get_all_params(self.treatment_output, trainable=True)
param_updates = updates.adadelta(loss, params)
self._train_fn = theano.function(
[
input_vars,
targets,
instrument_vars,
],
loss,
updates=param_updates
)
self._loss_fn = theano.function(
[
input_vars,
targets,
instrument_vars,
],
loss,
)
self._output_fn = theano.function(
[
input_vars,
],
test_prediction,
)
return init_params