def prepare_style(self, scale=1.0):
"""Called each phase of the optimization, process the style image according to the scale, then run it
through the model to extract intermediate outputs (e.g. sem4_1) and turn them into patches.
"""
style_img = self.rescale_image(self.style_img_original, scale)
self.style_img = self.model.prepare_image(style_img)
style_map = self.rescale_image(self.style_map_original, scale)
self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32)
# Compile a function to run on the GPU to extract patches for all layers at once.
layer_outputs = zip(self.style_layers, self.model.get_outputs('sem', self.style_layers))
extractor = self.compile([self.model.tensor_img, self.model.tensor_map], self.do_extract_patches(layer_outputs))
result = extractor(self.style_img, self.style_map)
# Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size.
self.style_data = {}
for layer, *data in zip(self.style_layers, result[0::3], result[1::3], result[2::3]):
patches = data[0]
l = self.model.network['nn'+layer]
l.num_filters = patches.shape[0] // args.slices
self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\
+ [np.zeros((patches.shape[0],), dtype=np.float16)]
print(' - Style layer {}: {} patches in {:,}kb.'.format(layer, patches.shape, patches.size//1000))
python类layers()的实例源码
def freezeParameters(net, single=True):
"""
Freeze parameters of a layer or a network so that they are not trainable
anymore
Parameters
----------
net: a network layer
single: whether to freeze a single layer of all of the layers below as well
"""
all_layers = lasagne.layers.get_all_layers(net)
if single:
all_layers = [all_layers[-1]]
for layer in all_layers:
layer_params = layer.get_params()
for p in layer_params:
try:
layer.params[p].remove('trainable')
except KeyError:
pass
def unfreezeParameters(net, single=True):
"""
Unfreeze parameters of a layer or a network so that they become trainable
again
Parameters
----------
net: a network layer
single: whether to freeze a single layer of all of the layers below as well
"""
all_layers = lasagne.layers.get_all_layers(net)
if single:
all_layers = [all_layers[-1]]
for layer in all_layers:
layer_params = layer.get_params()
for p in layer_params:
try:
layer.params[p].add('trainable')
except KeyError:
pass
def __init__(self, l_in, bgr_mean=np.array([103.939, 116.779, 123.68]),
data_format='bc01', **kwargs):
"""A Layer to normalize and convert images from RGB to BGR
This layer converts images from RGB to BGR to adapt to Caffe
that uses OpenCV, which uses BGR. It also subtracts the
per-pixel mean. From:
https://github.com/fvisin/reseg/blob/variable_size_images/vgg16.py
Parameters
----------
l_in : :class:``lasagne.layers.Layer``
The incoming layer, typically an
:class:``lasagne.layers.InputLayer``
bgr_mean : iterable of 3 ints
The mean of each channel. By default, the ImageNet
mean values are used.
data_format : str
The format of l_in, either `b01c` (batch, rows, cols,
channels) or `bc01` (batch, channels, rows, cols)
"""
super(RGBtoBGRLayer, self).__init__(l_in, **kwargs)
assert data_format in ['bc01', 'b01c']
self.l_in = l_in
floatX = theano.config.floatX
self.bgr_mean = bgr_mean.astype(floatX)
self.data_format = data_format
def get_output_for(self, input, deterministic=False, **kwargs):
def _phase_shift(input,r):
bsize,c,a,b = input.shape[0],1,self.output_shape[2]//r,self.output_shape[3]//r
X = T.reshape(input, (bsize,r,r,a,b))
X = T.transpose(X, (0, 3,4,1,2)) # bsize, a, b, r2,r1
X = T.split(x=X,splits_size=[1]*a,n_splits=a,axis=1) # a, [bsize, b, r, r]
X = [T.reshape(x,(bsize,b,r,r))for x in X]
X = T.concatenate(X,axis=2) # bsize, b, a*r, r
X = T.split(x=X,splits_size =[1]*b,n_splits=b,axis=1) # b, [bsize, a*r, r]
X = [T.reshape(x,(bsize,a*r,r))for x in X]
X = T.concatenate(X,axis=2) # bsize, a*r, b*r
return X.dimshuffle(0,'x',1,2)
Xc = T.split(x=input,splits_size =[input.shape[1]//self.c]*self.c,n_splits=self.c,axis=1)
return T.concatenate([_phase_shift(xc,self.r) for xc in Xc],axis=1)
# Multiscale Dilated Convolution Block
# This function (not a layer in and of itself, though you could make it one) returns a set of concatenated conv2d and dilatedconv2d layers.
# Each layer uses the same basic filter W, operating at a different dilation factor (or taken as the mean of W for the 1x1 conv).
# The channel-wise output of each layer is weighted by a set of coefficients, which are initialized to 1 / the total number of dilation scales,
# meaning that were starting by taking an elementwise mean. These should be learnable parameters.
# NOTES: - I'm considering changing the variable names to be more descriptive, and look less like ridiculous academic code. It's on the to-do list.
# - I keep the bias and nonlinearity out of the default definition for this layer, as I expect it to be batchnormed and nonlinearized in the model config.
def cnn_build(self, max_epochs=20, batch_size=100,
learning_rate=0.001, momentum=0.9,
verbose=1):
"""Build the network"""
if batch_size is None:
self.net = NeuralNet(
layers=self.layers,
max_epochs=max_epochs,
update=lasagne.updates.nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=momentum,
regression=False,
verbose=verbose)
else:
# batch iterator
batch_iterator = self.gen_BatchIterator(batch_size=batch_size)
self.net = NeuralNet(
layers=self.layers,
batch_iterator_train=batch_iterator,
max_epochs=max_epochs,
update=lasagne.updates.nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=momentum,
regression=False,
verbose=verbose)
def do_match_patches(self, layer):
# Use node in the model to compute the result of the normalized cross-correlation, using results from the
# nearest-neighbor layers called 'nn3_1' and 'nn4_1'.
dist = self.matcher_outputs[layer]
dist = dist.reshape((dist.shape[1], -1))
# Compute the score of each patch, taking into account statistics from previous iteration. This equalizes
# the chances of the patches being selected when the user requests more variety.
offset = self.matcher_history[layer].reshape((-1, 1))
scores = (dist - offset * args.variety)
# Pick the best style patches for each patch in the current image, the result is an array of indices.
# Also return the maximum value along both axis, used to compare slices and add patch variety.
return [scores.argmax(axis=0), scores.max(axis=0), dist.max(axis=1)]
#------------------------------------------------------------------------------------------------------------------
# Error/Loss Functions
#------------------------------------------------------------------------------------------------------------------
def style_loss(self):
"""Returns a list of loss components as Theano expressions. Finds the best style patch for each patch in the
current image using normalized cross-correlation, then computes the mean squared error for all patches.
"""
style_loss = []
if args.style_weight == 0.0:
return style_loss
# Extract the patches from the current image, as well as their magnitude.
result = self.do_extract_patches(zip(self.style_layers, self.model.get_outputs('conv', self.style_layers)))
# Multiple style layers are optimized separately, usually conv3_1 and conv4_1 — semantic data not used here.
for l, matches, patches in zip(self.style_layers, self.tensor_matches, result[0::3]):
# Compute the mean squared error between the current patch and the best matching style patch.
# Ignore the last channels (from semantic map) so errors returned are indicative of image only.
loss = T.mean((patches - matches[:,:self.model.channels[l]]) ** 2.0)
style_loss.append(('style', l, args.style_weight * loss))
return style_loss
def get_objective(l1=0, l2=0.005):
def objective(layers, loss_function, target, aggregate=aggregate,
deterministic=False, get_output_kw=None):
if get_output_kw is None:
get_output_kw = {}
output_layer = layers[-1]
first_layer = layers[1]
network_output = lasagne.layers.get_output(
output_layer, deterministic=deterministic, **get_output_kw)
if not deterministic:
losses = loss_function(network_output, target) \
+ l2 * regularization.regularize_network_params(
output_layer, regularization.l2) \
+ l1 * regularization.regularize_layer_params(
output_layer, regularization.l1)
else:
losses = loss_function(network_output, target)
return aggregate(losses)
return objective
def exe_rnn(use_embedd, length, num_units, position, binominal):
batch_size = BATCH_SIZE
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
target_var = T.ivector(name='targets')
layer_input = lasagne.layers.InputLayer(shape=(None, length, 1), input_var=input_var, name='input')
if use_embedd:
layer_position = construct_position_input(batch_size, length, num_units)
layer_input = lasagne.layers.concat([layer_input, layer_position], axis=2)
layer_rnn = RecurrentLayer(layer_input, num_units, nonlinearity=nonlinearities.tanh, only_return_final=True,
W_in_to_hid=lasagne.init.GlorotUniform(), W_hid_to_hid=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.), name='RNN')
# W = layer_rnn.W_hid_to_hid.sum()
# U = layer_rnn.W_in_to_hid.sum()
# b = layer_rnn.b.sum()
layer_output = DenseLayer(layer_rnn, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')
return train(layer_output, layer_rnn, input_var, target_var, batch_size, length, position, binominal)
def create_updates(loss, network, opt, learning_rate, momentum, beta1, beta2):
params = lasagne.layers.get_all_params(network, trainable=True)
grads = theano.grad(loss, params)
# if max_norm:
# names = ['crf.U', 'crf.W_h', 'crf.W_c', 'crf.b']
# constraints = [grad for param, grad in zip(params, grads) if param.name in names]
# assert len(constraints) == 4
# scaled_grads = total_norm_constraint(constraints, max_norm=max_norm)
# counter = 0
# for i in xrange(len(params)):
# param = params[i]
# if param.name in names:
# grads[i] = scaled_grads[counter]
# counter += 1
# assert counter == 4
if opt == 'adam':
updates = adam(grads, params=params, learning_rate=learning_rate, beta1=beta1, beta2=beta2)
elif opt == 'momentum':
updates = nesterov_momentum(grads, params=params, learning_rate=learning_rate, momentum=momentum)
else:
raise ValueError('unkown optimization algorithm: %s' % opt)
return updates
def __init__(self):
self.network = collections.OrderedDict()
self.network['img'] = InputLayer((None, 3, None, None))
self.network['seed'] = InputLayer((None, 3, None, None))
config, params = self.load_model()
self.setup_generator(self.last_layer(), config)
if args.train:
concatenated = lasagne.layers.ConcatLayer([self.network['img'], self.network['out']], axis=0)
self.setup_perceptual(concatenated)
self.load_perceptual()
self.setup_discriminator()
self.load_generator(params)
self.compile()
#------------------------------------------------------------------------------------------------------------------
# Network Configuration
#------------------------------------------------------------------------------------------------------------------
def build_critic(input_var=None):
from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
DenseLayer)
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import LeakyRectify
lrelu = LeakyRectify(0.2)
# input: (None, 1, 28, 28)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
# two convolutions
layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
nonlinearity=lrelu))
# fully-connected layer
layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
# output layer (linear)
layer = DenseLayer(layer, 1, nonlinearity=None)
print ("critic output:", layer.output_shape)
return layer
def build_model(self):
rng=np.random.RandomState(1234)
lasagne.random.set_rng(rng)
# Prepare Theano variables for inputs and targets
self.noise_var = T.matrix('noise')
self.input_var = T.tensor4('inputs')
# Create neural network model
generator = build_generator(self.noise_var)
critic = build_critic(self.input_var)
# Create expression for passing real data through the critic
self.real_out = lasagne.layers.get_output(critic)
# Create expression for passing fake data through the critic
self.fake_out = lasagne.layers.get_output(critic,
lasagne.layers.get_output(generator))
# Create update expressions for training
self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
self.generator = generator
self.critic = critic
def build_critic(input_var=None, verbose=False):
from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
DenseLayer)
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import LeakyRectify, sigmoid
lrelu = LeakyRectify(0.2)
# input: (None, 1, 28, 28)
layer = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# two convolutions
layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 512, 5, stride=2, pad='same',
nonlinearity=lrelu))
# # fully-connected layer
# layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
# output layer (linear)
layer = DenseLayer(layer, 1, nonlinearity=None)
if verbose: print ("critic output:", layer.output_shape)
return layer
def build_model(self):
rng=np.random.RandomState(1234)
lasagne.random.set_rng(rng)
# Prepare Theano variables for inputs and targets
self.noise_var = T.matrix('noise')
self.input_var = T.tensor4('inputs')
# Create neural network model
generator = build_generator(self.noise_var,self.verbose)
critic = build_critic(self.input_var,self.verbose)
# Create expression for passing real data through the critic
self.real_out = lasagne.layers.get_output(critic)
# Create expression for passing fake data through the critic
self.fake_out = lasagne.layers.get_output(critic,
lasagne.layers.get_output(generator))
# Create update expressions for training
self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
self.generator = generator
self.critic = critic
def build_model(self):
import theano.tensor as T
self.x = T.ftensor4('x')
self.y = T.lvector('y')
self.lr = T.scalar('lr')
net = build_model_resnet50(input_shape=(None, 3, 224, 224))
if self.verbose: print('Total number of layers:', len(lasagne.layers.get_all_layers(net['prob'])))
self.output_layer = net['prob']
from lasagne.layers import get_output
self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
from lasagne.objectives import categorical_accuracy
self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
def compile_val(self):
if self.verbose: print('compiling validation function...')
import theano
from lasagne.layers import get_output
output_val = lasagne.layers.get_output(self.output_layer, self.x, deterministic=True)
from lasagne.objectives import categorical_accuracy, categorical_crossentropy
cost = categorical_crossentropy(output_val, self.y).mean()
error = 1-categorical_accuracy(output_val, self.y, top_k=1).mean()
error_top_5 = 1-categorical_accuracy(output_val, self.y, top_k=5).mean()
self.val_fn= theano.function([self.subb_ind], [cost,error,error_top_5], updates=[],
givens=[(self.x, self.shared_x_slice),
(self.y, self.shared_y_slice)]
)
def build_model(self):
import theano.tensor as T
self.x = T.ftensor4('x')
self.y = T.lvector('y')
self.lr = T.scalar('lr')
net = build_model_vgg16(input_shape=(None, 3, 224, 224), verbose=self.verbose)
self.output_layer = net['prob']
from lasagne.layers import get_output
self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
from lasagne.objectives import categorical_accuracy
self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
def compile_val(self):
if self.verbose: print('compiling validation function...')
import theano
from lasagne.layers import get_output
output_val = lasagne.layers.get_output(self.output_layer, self.x, deterministic=True)
from lasagne.objectives import categorical_accuracy, categorical_crossentropy
cost = categorical_crossentropy(output_val, self.y).mean()
error = 1-categorical_accuracy(output_val, self.y, top_k=1).mean()
error_top_5 = 1-categorical_accuracy(output_val, self.y, top_k=5).mean()
self.val_fn= theano.function([self.subb_ind], [cost,error,error_top_5], updates=[],
givens=[(self.x, self.shared_x_slice),
(self.y, self.shared_y_slice)]
)
def build_critic(input_var=None):
from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
DenseLayer)
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import LeakyRectify
lrelu = LeakyRectify(0.2)
# input: (None, 1, 28, 28)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
# two convolutions
layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
nonlinearity=lrelu))
# fully-connected layer
layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
# output layer (linear and without bias)
layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
print ("critic output:", layer.output_shape)
return layer
def build_model(self):
rng=np.random.RandomState(1234)
lasagne.random.set_rng(rng)
# Prepare Theano variables for inputs and targets
self.noise_var = T.matrix('noise')
self.input_var = T.tensor4('inputs')
# Create neural network model
generator = build_generator(self.noise_var)
critic = build_critic(self.input_var)
# Create expression for passing real data through the critic
self.real_out = lasagne.layers.get_output(critic)
# Create expression for passing fake data through the critic
self.fake_out = lasagne.layers.get_output(critic,
lasagne.layers.get_output(generator))
# Create update expressions for training
self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
self.generator = generator
self.critic = critic
def build_model(self):
import theano.tensor as T
self.x = T.ftensor4('x')
self.y = T.lvector('y')
self.lr = T.scalar('lr')
net = build_model_resnet152(input_shape=(None, 3, 224, 224))
self.output_layer = net['prob']
from lasagne.layers import get_output
self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
from lasagne.objectives import categorical_accuracy
self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
def compile_val(self):
if self.verbose: print('compiling validation function...')
import theano
from lasagne.layers import get_output
output_val = lasagne.layers.get_output(self.output_layer, self.x, deterministic=True)
from lasagne.objectives import categorical_accuracy, categorical_crossentropy
cost = categorical_crossentropy(output_val, self.y).mean()
error = 1-categorical_accuracy(output_val, self.y, top_k=1).mean()
error_top_5 = 1-categorical_accuracy(output_val, self.y, top_k=5).mean()
self.val_fn= theano.function([self.subb_ind], [cost,error,error_top_5], updates=[],
givens=[(self.x, self.shared_x_slice),
(self.y, self.shared_y_slice)]
)
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
peepholes=False, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
# save model that wil be created
self.model = model
self.n_batch = n_batch
self.n_lat = 100
self.n_dim = n_dim
self.n_chan = n_chan
# invoke parent constructor
Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)
# sample generation
Z = T.matrix(dtype=theano.config.floatX) # noise matrix
_, _, _, _, l_sample, l_p_z = self.network
sample = lasagne.layers.get_output(l_sample, {l_p_z : Z}, deterministic=True)
self.sample = theano.function([Z], sample, on_unused_input='warn')
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
# save model that wil be created
self.model = model
self.n_sample = 1 # adjustable parameter, though 1 works best in practice
self.n_batch = n_batch
self.n_lat = 200
self.n_dim = n_dim
self.n_chan = n_chan
self.n_batch = n_batch
Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)
# sample generation
Z = T.matrix(dtype=theano.config.floatX) # noise matrix
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
l_qa, l_qz, l_d = self.network
sample = lasagne.layers.get_output(l_px_mu, {l_qz : Z}, deterministic=True)
self.sample = theano.function([Z], sample, on_unused_input='warn')
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
# save model that wil be created
self.model = model
self.n_batch = n_batch
self.n_lat = 100
self.n_dim = n_dim
self.n_chan = n_chan
self.n_batch = n_batch
# invoke parent constructor
Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)
# sample generation
Z = T.matrix(dtype=theano.config.floatX) # noise matrix
_, _, _, _, l_sample, l_p_z = self.network
sample = lasagne.layers.get_output(l_sample, {l_p_z : Z}, deterministic=True)
self.sample = theano.function([Z], sample, on_unused_input='warn')
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
# save model that wil be created
self.model = model
self.n_sample = 1 # adjustable parameter, though 1 works best in practice
self.n_batch = n_batch
self.n_lat = 200
self.n_dim = n_dim
self.n_chan = n_chan
self.n_batch = n_batch
Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)
# sample generation
Z = T.matrix(dtype=theano.config.floatX) # noise matrix
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
l_qa, l_qz = self.network
sample = lasagne.layers.get_output(l_px_mu, {l_qz : Z}, deterministic=True)
self.sample = theano.function([Z], sample, on_unused_input='warn')