def _set_inverse_parameters(self, patterns=None):
self.trainable_layers = [self.inverse_map[l]
for l in L.get_all_layers(self.output_layer)
if type(l) in [L.Conv2DLayer, L.DenseLayer]]
if patterns is not None:
if type(patterns) is list:
patterns = patterns[0]
for i,layer in enumerate(self.trainable_layers):
pattern = patterns['A'][i]
if pattern.ndim == 4:
pattern = pattern.transpose(1,0,2,3)
elif pattern.ndim == 2:
pattern = pattern.T
layer.W.set_value(pattern)
else:
print("Patterns not given, explanation is random.")
python类get_all_layers()的实例源码
def _set_inverse_parameters(self, patterns=None):
self.trainable_layers = [(l, self.inverse_map[l])
for l in L.get_all_layers(self.output_layer)
if type(l) in [L.Conv2DLayer, L.DenseLayer]]
if patterns is not None:
if type(patterns) is list:
patterns = patterns[0]
for i,layer in enumerate(self.trainable_layers):
param = layer[0].W.get_value()
pattern = patterns['A'][i]
if pattern.ndim == 4:
if layer[0].flip_filters:
param = param[:,:,::-1,::-1]
pattern = param*pattern
pattern = pattern.transpose(1,0,2,3)
elif pattern.ndim == 2:
pattern = param*pattern
pattern = pattern.T
layer[1].W.set_value(pattern)
else:
print("Patterns not given, explanation is random.")
def loadModel(filename):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
data = pickle.load(f)
#for training, we only want to load the model params
net = data['net']
params = l.get_all_param_values(net)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
def _set_inverse_parameters(self, patterns=None):
for l in L.get_all_layers(self.output_layer):
if type(l) is L.Conv2DLayer:
W = l.W.get_value()
if l.flip_filters:
W = W[:,:,::-1,::-1]
W = W.transpose(1,0,2,3)
self.inverse_map[l].W.set_value(W)
elif type(l) is L.DenseLayer:
self.inverse_map[l].W.set_value(l.W.get_value().T)
def _set_inverse_parameters(self, patterns=None):
for l in L.get_all_layers(self.output_layer):
if type(l) is L.Conv2DLayer:
W = l.W.get_value()
if l.flip_filters:
W = W[:,:,::-1,::-1]
W = W.transpose(1,0,2,3)
self.inverse_map[l].W.set_value(W)
elif type(l) is L.DenseLayer:
self.inverse_map[l].W.set_value(l.W.get_value().T)
def __init__(self, output_layer, patterns=None, **kwargs):
__check_cpu_flaw__()
layers = L.get_all_layers(output_layer)
self.layers = layers
self.input_layer = layers[0]
self.output_layer = layers[-1]
self._init_explain_function(patterns=patterns, **kwargs)
pass
def _init_network(self, patterns=None, **kwargs):
self._remove_softmax()
self.relevance_values = T.matrix()
self._construct_layer_maps()
tmp = self._invert_layer_recursion(self.input_layer, None)
self.explain_output_layer = tmp
# Call in any case. Patterns are not always needed.
self._set_inverse_parameters(patterns=patterns)
#print("\n\n\nNetwork")
#for l in get_all_layers(self.explain_output_layer):
# print(type(l), get_output_shape(l))
#print("\n\n\n")
def _collect_layers(self):
self.all_layers = L.get_all_layers(self.output_layer)
ret = [l for l in self.all_layers if
type(l) in [L.DenseLayer, L.Conv2DLayer]]
return ret
def loadParams(epoch, filename=None):
print "IMPORTING MODEL PARAMS...",
if filename == None:
net_filename = MODEL_PATH + "birdCLEF_" + RUN_NAME + "_model_params_epoch_" + str(epoch) + ".pkl"
else:
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
params = pickle.load(f)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
def getPredictionFuntion(net):
net_output = l.get_output(net, deterministic=True)
print "COMPILING THEANO TEST FUNCTION...",
start = time.time()
test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
print "DONE! (", int(time.time() - start), "s )"
return test_net
def getPredictionFuntion(net):
net_output = l.get_output(net, deterministic=True)
print "COMPILING THEANO TEST FUNCTION...",
start = time.time()
test_net = theano.function([l.get_all_layers(net)[0].input_var], net_output, allow_input_downcast=True)
print "DONE! (", int(time.time() - start), "s )"
return test_net
################# PREDICTION POOLING ####################
def get_equivalent_input_padding(layer, layers_args=[]):
"""Compute the equivalent padding in the input layer
A function to compute the equivalent padding of a sequence of
convolutional and pooling layers. It memorizes the padding
of all the Layers up to the first InputLayer.
It then computes what would be the equivalent padding in the Layer
immediately before the chain of Layers that is being taken into account.
"""
# Initialize the DynamicPadding layers
lasagne.layers.get_output(layer)
# Loop through conv and pool to collect data
all_layers = get_all_layers(layer)
# while(not isinstance(layer, (InputLayer))):
for layer in all_layers:
# Note: stride is numerical, but pad *could* be symbolic
try:
pad, stride = (layer.pad, layer.stride)
if isinstance(pad, int):
pad = pad, pad
if isinstance(stride, int):
stride = stride, stride
layers_args.append((pad, stride))
except(AttributeError):
pass
# Loop backward to compute the equivalent padding in the input
# layer
tot_pad = T.zeros(2)
pad_factor = T.ones(2)
while(layers_args):
pad, stride = layers_args.pop()
tot_pad += pad * pad_factor
pad_factor *= stride
return tot_pad
def name_layers(layers, name, use_start_as_input=True):
if use_start_as_input:
layers_to_name = ll.get_all_layers(layers[-1], treat_as_input=[layers[0]])
else:
layers_to_name = ll.get_all_layers(layers[-1])
for i, layer in enumerate(layers_to_name):
this_name = name + '_' + str(i)
layer.name = this_name
params = layer.get_params()
for param in params:
param.name += '_' + this_name
def find_bn_updates(layers):
"""
Return a list of tuples of all the bn_layers in this list of layers
"""
bn_updates = []
layers = ll.get_all_layers(layers)
for layer in layers:
if hasattr(layer, 'bn_updates'):
bn_updates.extend(layer.bn_updates)
return bn_updates
cnn_cascade_lasagne.py 文件源码
项目:Cascade-CNN-Face-Detection
作者: gogolgrind
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def __freeze__(self):
for layer in layers.get_all_layers(self.net):
for param in layer.params:
layer.params[param].discard('trainable')
def predict_classify(target_var, target_labels, model_path):
#redefine model
target_var = T.imatrix('y')
target_labels = target_var
dnn_strategy = model_path.split('/')[-1].split('_')[0]
network = get_model_by_strategy(dnn_strategy)
#load params
params = []
with open(model_path, 'r') as f:
lines = f.readlines()
for line in lines:
params.append(np.array(json.loads(line)))
set_all_param_values(network, params)
predict_prediction = get_output(network, deterministic=True)
predict_acc = binary_accuracy(predict_prediction, target_labels).mean()
input_layer = get_all_layers(network)[0]
predict = theano.function([input_layer.input_var, target_var],[predict_prediction, predict_acc])
X, labels, values, _ = load_dataset('../../data/test')
predict_prediction, predict_acc = predict(X, labels)
sys.stdout.write(" predict accuracy:\t\t\t{} %\n".format(predict_acc * 100))
#output predict result
with open('../../data/prediction', 'w') as f:
for ix in xrange(len(labels)):
line = str(labels[ix]) + '\t' + str(values[ix]) + '\t' + str(predict_prediction[ix][0]) + '\n'
f.write(line)
sys.stdout.flush()
def predict_regress(model_path):
#redefine model
target_var = T.fmatrix('y')
target_labels = T.switch(T.gt(target_var, 0), 1, 0)
dnn_strategy = model_path.split('/')[-1].split('_')[0]
network = get_model_by_strategy(dnn_strategy)
#load params
params = []
with open(model_path, 'r') as f:
lines = f.readlines()
for line in lines:
params.append(np.array(json.loads(line)))
set_all_param_values(network, params)
predict_prediction = get_output(network, deterministic=True)
predict_labels = T.switch(T.gt(predict_prediction, 0), 1, 0)
predict_acc = binary_accuracy(predict_labels, target_labels, threshold=0).mean()
input_layer = get_all_layers(network)[0]
predict = theano.function([input_layer.input_var, target_var],[predict_prediction, predict_acc])
X, y, labels, values, _, _, _, _, _, _ = load_dataset('../../data/test')
predict_prediction, predict_acc = predict(X, y)
sys.stdout.write(" predict accuracy:\t\t\t{} %\n".format(predict_acc * 100))
#output predict result
with open('../../data/prediction', 'w') as f:
for ix in xrange(len(labels)):
line = str(labels[ix]) + '\t' + str(values[ix]) + '\t' + str(predict_prediction[ix][0]) + '\n'
f.write(line)
sys.stdout.flush()
def description(self):
def get_number_of_params(l):
return np.sum([
np.prod(param.get_value().shape)
for param in l.get_params()
])
def describe_layer(l):
return '%s\n output shape:%s\n number of params: %s' % (l, l.output_shape, get_number_of_params(l))
return '%s\n%s' % (
str(self),
'\n'.join([describe_layer(l) for l in layers.get_all_layers(self.outputs)])
)
def getPredictionFuntion(net):
net_output = l.get_output(net, deterministic=True)
print "COMPILING THEANO TEST FUNCTION...",
start = time.time()
test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
print "DONE! (", int(time.time() - start), "s )"
return test_net
################# PREDICTION POOLING ####################
def getPredictionFuntion(net):
net_output = l.get_output(net, deterministic=True)
print "COMPILING THEANO TEST FUNCTION...",
start = time.time()
test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
print "DONE! (", int(time.time() - start), "s )"
return test_net
################# PREDICTION POOLING ####################
def buildModel(mtype=1):
print "BUILDING MODEL TYPE", mtype, "..."
#default settings (Model 1)
filters = 64
first_stride = 2
last_filter_multiplier = 16
#specific model type settings (see working notes for details)
if mtype == 2:
first_stride = 1
elif mtype == 3:
filters = 32
last_filter_multiplier = 8
#input layer
net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))
#conv layers
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
if mtype == 2:
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)
#dense layers
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.DropoutLayer(net, DROPOUT)
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.DropoutLayer(net, DROPOUT)
#Classification Layer
if MULTI_LABEL:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
else:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))
print "...DONE!"
#model stats
print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
print "MODEL HAS", l.count_params(net), "PARAMS"
return net
def buildModel(mtype=1):
print "BUILDING MODEL TYPE", mtype, "..."
#default settings (Model 1)
filters = 64
first_stride = 2
last_filter_multiplier = 16
#specific model type settings (see working notes for details)
if mtype == 2:
first_stride = 1
elif mtype == 3:
filters = 32
last_filter_multiplier = 8
#input layer
net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))
#conv layers
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
if mtype == 2:
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)
#dense layers
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
#Classification Layer
if MULTI_LABEL:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
else:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))
print "...DONE!"
#model stats
print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
print "MODEL HAS", l.count_params(net), "PARAMS"
return net
def buildModel(mtype=1):
print "BUILDING MODEL TYPE", mtype, "..."
#default settings (Model 1)
filters = 64
first_stride = 2
last_filter_multiplier = 16
#specific model type settings (see working notes for details)
if mtype == 2:
first_stride = 1
elif mtype == 3:
filters = 32
last_filter_multiplier = 8
#input layer
net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))
#conv layers
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
if mtype == 2:
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)
#dense layers
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
#Classification Layer
if MULTI_LABEL:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
else:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))
print "...DONE!"
#model stats
print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
print "MODEL HAS", l.count_params(net), "PARAMS"
return net
def get_discriminator(self):
''' specify discriminator D0 '''
"""
disc0_layers = [LL.InputLayer(shape=(self.args.batch_size, 3, 32, 32))]
disc0_layers.append(LL.GaussianNoiseLayer(disc0_layers[-1], sigma=0.05))
disc0_layers.append(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 16x16
disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu)))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 8x8
disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=0, W=Normal(0.02), nonlinearity=nn.lrelu))) # 6x6
disc0_layer_shared = LL.NINLayer(disc0_layers[-1], num_units=192, W=Normal(0.02), nonlinearity=nn.lrelu) # 6x6
disc0_layers.append(disc0_layer_shared)
disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared, num_units=50, W=Normal(0.02), nonlinearity=None)
disc0_layers.append(disc0_layer_z_recon) # also need to recover z from x
disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
disc0_layer_adv = LL.DenseLayer(disc0_layers[-1], num_units=10, W=Normal(0.02), nonlinearity=None)
disc0_layers.append(disc0_layer_adv)
return disc0_layers, disc0_layer_adv, disc0_layer_z_recon
"""
disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
disc_x_layers.append(disc_x_layers_shared)
disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=self.args.z0dim, nonlinearity=None)
disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x
# disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
disc_x_layers.append(disc_x_layer_adv)
#output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
#output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)
# temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
# temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
# init_updates = [u for l in LL.get_all_layers(gen_x_layers)+LL.get_all_layers(disc_x_layers) for u in getattr(l,'init_updates',[])]
return disc_x_layers, disc_x_layer_adv, disc_x_layer_z_recon
def buildModel():
print "BUILDING MODEL TYPE..."
#default settings
filters = 64
first_stride = 2
last_filter_multiplier = 16
#input layer
net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))
#conv layers
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)
#dense layers
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.DropoutLayer(net, DROPOUT)
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.DropoutLayer(net, DROPOUT)
#Classification Layer
if MULTI_LABEL:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
else:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))
print "...DONE!"
#model stats
print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
print "MODEL HAS", l.count_params(net), "PARAMS"
return net