def build_encoder_layers(input_size, encode_size, sigma=0.5):
"""
builds an autoencoder with gaussian noise layer
:param input_size: input size
:param encode_size: encoded size
:param sigma: gaussian noise standard deviation
:return: Weights of encoder layer, denoising autoencoder layer
"""
W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))
layers = [
(InputLayer, {'shape': (None, input_size)}),
(GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
(DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': sigmoid, 'W': W}),
(DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
]
return W, layers
python类GaussianNoiseLayer()的实例源码
def build_bottleneck_layer(input_size, encode_size, sigma=0.3):
W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))
layers = [
(InputLayer, {'shape': (None, input_size)}),
(GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
(DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': linear, 'W': W}),
(DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
]
return W, layers
def get_discriminator(self):
''' specify discriminator D0 '''
"""
disc0_layers = [LL.InputLayer(shape=(self.args.batch_size, 3, 32, 32))]
disc0_layers.append(LL.GaussianNoiseLayer(disc0_layers[-1], sigma=0.05))
disc0_layers.append(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 16x16
disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu)))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 8x8
disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=0, W=Normal(0.02), nonlinearity=nn.lrelu))) # 6x6
disc0_layer_shared = LL.NINLayer(disc0_layers[-1], num_units=192, W=Normal(0.02), nonlinearity=nn.lrelu) # 6x6
disc0_layers.append(disc0_layer_shared)
disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared, num_units=50, W=Normal(0.02), nonlinearity=None)
disc0_layers.append(disc0_layer_z_recon) # also need to recover z from x
disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
disc0_layer_adv = LL.DenseLayer(disc0_layers[-1], num_units=10, W=Normal(0.02), nonlinearity=None)
disc0_layers.append(disc0_layer_adv)
return disc0_layers, disc0_layer_adv, disc0_layer_z_recon
"""
disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
disc_x_layers.append(disc_x_layers_shared)
disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=self.args.z0dim, nonlinearity=None)
disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x
# disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
disc_x_layers.append(disc_x_layer_adv)
#output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
#output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)
# temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
# temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
# init_updates = [u for l in LL.get_all_layers(gen_x_layers)+LL.get_all_layers(disc_x_layers) for u in getattr(l,'init_updates',[])]
return disc_x_layers, disc_x_layer_adv, disc_x_layer_z_recon
def build_network():
conv_defs = {
'W': lasagne.init.HeNormal('relu'),
'b': lasagne.init.Constant(0.0),
'filter_size': (3, 3),
'stride': (1, 1),
'nonlinearity': lasagne.nonlinearities.LeakyRectify(0.1)
}
nin_defs = {
'W': lasagne.init.HeNormal('relu'),
'b': lasagne.init.Constant(0.0),
'nonlinearity': lasagne.nonlinearities.LeakyRectify(0.1)
}
dense_defs = {
'W': lasagne.init.HeNormal(1.0),
'b': lasagne.init.Constant(0.0),
'nonlinearity': lasagne.nonlinearities.softmax
}
wn_defs = {
'momentum': .999
}
net = InputLayer ( name='input', shape=(None, 3, 32, 32))
net = GaussianNoiseLayer(net, name='noise', sigma=.15)
net = WN(Conv2DLayer (net, name='conv1a', num_filters=128, pad='same', **conv_defs), **wn_defs)
net = WN(Conv2DLayer (net, name='conv1b', num_filters=128, pad='same', **conv_defs), **wn_defs)
net = WN(Conv2DLayer (net, name='conv1c', num_filters=128, pad='same', **conv_defs), **wn_defs)
net = MaxPool2DLayer (net, name='pool1', pool_size=(2, 2))
net = DropoutLayer (net, name='drop1', p=.5)
net = WN(Conv2DLayer (net, name='conv2a', num_filters=256, pad='same', **conv_defs), **wn_defs)
net = WN(Conv2DLayer (net, name='conv2b', num_filters=256, pad='same', **conv_defs), **wn_defs)
net = WN(Conv2DLayer (net, name='conv2c', num_filters=256, pad='same', **conv_defs), **wn_defs)
net = MaxPool2DLayer (net, name='pool2', pool_size=(2, 2))
net = DropoutLayer (net, name='drop2', p=.5)
net = WN(Conv2DLayer (net, name='conv3a', num_filters=512, pad=0, **conv_defs), **wn_defs)
net = WN(NINLayer (net, name='conv3b', num_units=256, **nin_defs), **wn_defs)
net = WN(NINLayer (net, name='conv3c', num_units=128, **nin_defs), **wn_defs)
net = GlobalPoolLayer (net, name='pool3')
net = WN(DenseLayer (net, name='dense', num_units=10, **dense_defs), **wn_defs)
return net