def load_model(self, epoch):
weights_toload = np.load(self.args.out_dir + '/Dweights_params_epoch{}.npz'.format(epoch))
weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
LL.set_all_param_values(self.D_weights_layer, weights_list_toload)
weights_toload = np.load(self.args.out_dir + '/Gweights_params_epoch{}.npz'.format(epoch))
weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
LL.set_all_param_values(self.G_weights_layer, weights_list_toload)
for i in range(self.args.ng):
weights_toload = np.load(self.args.out_dir + '/disc%d_params_epoch%d.npz' % (i,epoch))
weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
LL.set_all_param_values(self.D_layers[i], weights_list_toload)
weights_toload = np.load(self.args.out_dir + '/gen%d_params_epoch%d.npz' % (i,epoch))
weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
LL.set_all_param_values(self.G_layers[i], weights_list_toload)
python类set_all_param_values()的实例源码
def loadModel(filename):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
data = pickle.load(f)
#for training, we only want to load the model params
net = data['net']
params = l.get_all_param_values(net)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
def load_model(self, load_path):
with open(load_path, 'r') as f:
data = pickle.load(f)
L.set_all_param_values(self.network, data)
def loadParams(epoch, filename=None):
print "IMPORTING MODEL PARAMS...",
if filename == None:
net_filename = MODEL_PATH + "birdCLEF_" + RUN_NAME + "_model_params_epoch_" + str(epoch) + ".pkl"
else:
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
params = pickle.load(f)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
def loadParams(epoch, filename=None):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
params = pickle.load(f)
l.set_all_param_values(NET, params)
print "DONE!"
#load params of trained model
def loadParams(epoch, filename=None):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
params = pickle.load(f)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
################ PREDICTION SAVE/LOAD ##################
def load_model(self, load_path):
with open(load_path, 'r') as f:
data = pickle.load(f)
L.set_all_param_values(self.network, data)
cnn_cascade_lasagne.py 文件源码
项目:Cascade-CNN-Face-Detection
作者: gogolgrind
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def load_model(self,model_name = nn_name+'.npz'):
print(model_name,'is loaded')
with sp.load(model_name) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
layers.set_all_param_values(self.net, param_values)
return self
def melt(self):
ls.set_all_param_values(self.frozen_network, ls.get_all_param_values(self.network))
def load_model(self, load_path):
with open(load_path, 'r') as f:
data = pkl.load(f)
L.set_all_param_values(self.network, data)
for item in self.trackers:
data = pkl.load(f)
L.set_all_param_values(item, data)
def load_model(self, load_path):
with open(load_path, 'r') as f:
data = pkl.load(f)
L.set_all_param_values(self.network, data)
def reset_params(self):
layers.set_all_param_values(self.treatment_output, self.init_treatment_params)
layers.set_all_param_values(self.instrument_output, self.init_instrument_params)
def weights(self, weights):
layers.set_all_param_values(self.outputs, weights)
def _sample_trained_minibatch_gan(params_file, n, batch_size, rs):
import lasagne
from lasagne.init import Normal
import lasagne.layers as ll
import theano as th
from theano.sandbox.rng_mrg import MRG_RandomStreams
import theano.tensor as T
import nn
theano_rng = MRG_RandomStreams(rs.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rs.randint(2 ** 15)))
noise_dim = (batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
ls = [ll.InputLayer(shape=noise_dim, input_var=noise)]
ls.append(nn.batch_norm(
ll.DenseLayer(ls[-1], num_units=4*4*512, W=Normal(0.05),
nonlinearity=nn.relu),
g=None))
ls.append(ll.ReshapeLayer(ls[-1], (batch_size,512,4,4)))
ls.append(nn.batch_norm(
nn.Deconv2DLayer(ls[-1], (batch_size,256,8,8), (5,5), W=Normal(0.05),
nonlinearity=nn.relu),
g=None)) # 4 -> 8
ls.append(nn.batch_norm(
nn.Deconv2DLayer(ls[-1], (batch_size,128,16,16), (5,5), W=Normal(0.05),
nonlinearity=nn.relu),
g=None)) # 8 -> 16
ls.append(nn.weight_norm(
nn.Deconv2DLayer(ls[-1], (batch_size,3,32,32), (5,5), W=Normal(0.05),
nonlinearity=T.tanh),
train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(ls[-1])
with np.load(params_file) as d:
params = [d['arr_{}'.format(i)] for i in range(9)]
ll.set_all_param_values(ls[-1], params, trainable=True)
sample_batch = th.function(inputs=[], outputs=gen_dat)
samps = []
while len(samps) < n:
samps.extend(sample_batch())
samps = np.array(samps[:n])
return samps
def __init__(self, args):
self.args = args
rng = np.random.RandomState(self.args.seed) # fixed random seeds
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
data_rng = np.random.RandomState(self.args.seed_data)
''' specify pre-trained generator E '''
self.enc_layers = [LL.InputLayer(shape=(None, 3, 32, 32), input_var=None)]
enc_layer_conv1 = dnn.Conv2DDNNLayer(self.enc_layers[-1], 64, (5,5), pad=0, stride=1, W=Normal(0.01), nonlinearity=nn.relu)
self.enc_layers.append(enc_layer_conv1)
enc_layer_pool1 = LL.MaxPool2DLayer(self.enc_layers[-1], pool_size=(2, 2))
self.enc_layers.append(enc_layer_pool1)
enc_layer_conv2 = dnn.Conv2DDNNLayer(self.enc_layers[-1], 128, (5,5), pad=0, stride=1, W=Normal(0.01), nonlinearity=nn.relu)
self.enc_layers.append(enc_layer_conv2)
enc_layer_pool2 = LL.MaxPool2DLayer(self.enc_layers[-1], pool_size=(2, 2))
self.enc_layers.append(enc_layer_pool2)
self.enc_layer_fc3 = LL.DenseLayer(self.enc_layers[-1], num_units=256, nonlinearity=T.nnet.relu)
self.enc_layers.append(self.enc_layer_fc3)
self.enc_layer_fc4 = LL.DenseLayer(self.enc_layers[-1], num_units=10, nonlinearity=T.nnet.softmax)
self.enc_layers.append(self.enc_layer_fc4)
''' load pretrained weights for encoder '''
weights_toload = np.load('pretrained/encoder.npz')
weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
LL.set_all_param_values(self.enc_layers[-1], weights_list_toload)
''' input tensor variables '''
#self.G_weights
#self.D_weights
self.dummy_input = T.scalar()
self.G_layers = []
self.z = theano_rng.uniform(size=(self.args.batch_size, self.args.z0dim))
self.x = T.tensor4()
self.meanx = T.tensor3()
self.Gen_x = T.tensor4()
self.D_layers = []
self.D_layer_adv = []
self.D_layer_z_recon = []
self.gen_lr = T.scalar() # learning rate
self.disc_lr = T.scalar() # learning rate
self.y = T.ivector()
self.y_1hot = T.matrix()
self.Gen_x_list = []
self.y_recon_list = []
self.mincost = T.scalar()
#self.enc_layer_fc3 = self.get_enc_layer_fc3()
self.real_fc3 = LL.get_output(self.enc_layer_fc3, self.x, deterministic=True)