def load_dbn(path='models/oulu_ae.mat'):
"""
load a pretrained dbn from path
:param path: path to the .mat dbn
:return: pretrained deep belief network
"""
# create the network using weights from pretrain_nn.mat
nn = sio.loadmat(path)
w1 = nn['w1']
w2 = nn['w2']
w3 = nn['w3']
w4 = nn['w4']
b1 = nn['b1'][0]
b2 = nn['b2'][0]
b3 = nn['b3'][0]
b4 = nn['b4'][0]
weights = [w1, w2, w3, w4]
biases = [b1, b2, b3, b4]
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
return weights, biases, shapes, nonlinearities
python类linear()的实例源码
def load_dbn(path='models/oulu_ae.mat'):
"""
load a pretrained dbn from path
:param path: path to the .mat dbn
:return: pretrained deep belief network
"""
# create the network using weights from pretrain_nn.mat
nn = sio.loadmat(path)
w1 = nn['w1']
w2 = nn['w2']
w3 = nn['w3']
w4 = nn['w4']
b1 = nn['b1'][0]
b2 = nn['b2'][0]
b3 = nn['b3'][0]
b4 = nn['b4'][0]
weights = [w1, w2, w3, w4]
biases = [b1, b2, b3, b4]
nonlinearities = [rectify, rectify, rectify, linear]
shapes = [2000, 1000, 500, 50]
return weights, biases, shapes, nonlinearities
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=adadelta,
update_learning_rate=0.01,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=adadelta,
update_learning_rate=0.01,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def compile_encoder(encoderpath=None):
# create input
if encoderpath:
l_encoder = pickle.load(open(encoderpath, 'rb'))
input_var = las.layers.get_all_layers(l_encoder)[0].input_var
visualize_layer(las.layers.get_all_layers(l_encoder)[2], 40, 30)
else:
input_var = T.matrix('input', dtype='float32')
weights, biases = autoencoder.load_dbn()
en_activations = [sigmoid, sigmoid, sigmoid, linear]
en_layersizes = [2000, 1000, 500, 50]
l_input = InputLayer((None, 1200), input_var, name='input')
l_encoder = autoencoder.create_model(l_input, weights[:4], biases[:4], en_activations, en_layersizes)
print_network(l_encoder)
encoded_features = las.layers.get_output(l_encoder)
encode_fn = theano.function([input_var], encoded_features, allow_input_downcast=True)
return encode_fn
def build_encoder_layers(input_size, encode_size, sigma=0.5):
"""
builds an autoencoder with gaussian noise layer
:param input_size: input size
:param encode_size: encoded size
:param sigma: gaussian noise standard deviation
:return: Weights of encoder layer, denoising autoencoder layer
"""
W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))
layers = [
(InputLayer, {'shape': (None, input_size)}),
(GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
(DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': sigmoid, 'W': W}),
(DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
]
return W, layers
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=adadelta,
update_learning_rate=0.01,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_weights(ae):
weights = []
biases = []
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
ae_layers = ae.get_all_layers()
weights.append(ae_layers[1].W.astype('float32'))
weights.append(ae_layers[2].W.astype('float32'))
weights.append(ae_layers[3].W.astype('float32'))
weights.append(ae_layers[4].W.astype('float32'))
biases.append(ae_layers[1].b.astype('float32'))
biases.append(ae_layers[2].b.astype('float32'))
biases.append(ae_layers[3].b.astype('float32'))
biases.append(ae_layers[4].b.astype('float32'))
return weights, biases, shapes, nonlinearities
def extract_weights(ae):
weights = []
biases = []
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
ae_layers = ae.get_all_layers()
weights.append(ae_layers[1].W.astype('float32'))
weights.append(ae_layers[2].W.astype('float32'))
weights.append(ae_layers[3].W.astype('float32'))
weights.append(ae_layers[4].W.astype('float32'))
biases.append(ae_layers[1].b.astype('float32'))
biases.append(ae_layers[2].b.astype('float32'))
biases.append(ae_layers[3].b.astype('float32'))
biases.append(ae_layers[4].b.astype('float32'))
return weights, biases, shapes, nonlinearities
def residual_block(resnet_in, num_styles=None, num_filters=None, filter_size=3, stride=1):
if num_filters == None:
num_filters = resnet_in.output_shape[1]
conv1 = style_conv_block(resnet_in, num_styles, num_filters, filter_size, stride)
conv2 = style_conv_block(conv1, num_styles, num_filters, filter_size, stride, linear)
res_block = ElemwiseSumLayer([conv2, resnet_in])
return res_block
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def load_finetuned_dbn(path):
"""
Load a fine tuned Deep Belief Net from file
:param path: path to deep belief net parameters
:return: deep belief net
"""
dbn = NeuralNet(
layers=[
('input', las.layers.InputLayer),
('l1', las.layers.DenseLayer),
('l2', las.layers.DenseLayer),
('l3', las.layers.DenseLayer),
('l4', las.layers.DenseLayer),
('l5', las.layers.DenseLayer),
('l6', las.layers.DenseLayer),
('l7', las.layers.DenseLayer),
('output', las.layers.DenseLayer)
],
input_shape=(None, 1200),
l1_num_units=2000, l1_nonlinearity=sigmoid,
l2_num_units=1000, l2_nonlinearity=sigmoid,
l3_num_units=500, l3_nonlinearity=sigmoid,
l4_num_units=50, l4_nonlinearity=linear,
l5_num_units=500, l5_nonlinearity=sigmoid,
l6_num_units=1000, l6_nonlinearity=sigmoid,
l7_num_units=2000, l7_nonlinearity=sigmoid,
output_num_units=1200, output_nonlinearity=linear,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
with open(path, 'rb') as f:
pretrained_nn = pickle.load(f)
if pretrained_nn is not None:
dbn.load_params_from(path)
return dbn
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 1000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 1000, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def create_model(input_var, input_shape, options):
conv_num_filters1 = 100
conv_num_filters2 = 150
conv_num_filters3 = 200
filter_size1 = 5
filter_size2 = 5
filter_size3 = 3
pool_size = 2
encode_size = options['BOTTLENECK']
dense_mid_size = options['DENSE']
pad_in = 'valid'
pad_out = 'full'
scaled_tanh = create_scaled_tanh()
input = InputLayer(shape=input_shape, input_var=input_var, name='input')
conv2d1 = Conv2DLayer(input, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
maxpool2d2 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d2')
conv2d3 = Conv2DLayer(maxpool2d2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
maxpool2d4 = MaxPool2DLayer(conv2d3, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
conv2d5 = Conv2DLayer(maxpool2d4, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6') # 3000
reshape6_output = reshape6.output_shape[1]
dense7 = DenseLayer(reshape6, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
bottleneck = DenseLayer(dense7, num_units=encode_size, name='bottleneck', nonlinearity=linear)
# print_network(bottleneck)
dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10') # 32 x 4 x 7
deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
print_network(reshape16)
return reshape16
def load_finetuned_dbn(path):
"""
Load a fine tuned Deep Belief Net from file
:param path: path to deep belief net parameters
:return: deep belief net
"""
dbn = NeuralNet(
layers=[
('input', las.layers.InputLayer),
('l1', las.layers.DenseLayer),
('l2', las.layers.DenseLayer),
('l3', las.layers.DenseLayer),
('l4', las.layers.DenseLayer),
('l5', las.layers.DenseLayer),
('l6', las.layers.DenseLayer),
('l7', las.layers.DenseLayer),
('output', las.layers.DenseLayer)
],
input_shape=(None, 1200),
l1_num_units=2000, l1_nonlinearity=sigmoid,
l2_num_units=1000, l2_nonlinearity=sigmoid,
l3_num_units=500, l3_nonlinearity=sigmoid,
l4_num_units=50, l4_nonlinearity=linear,
l5_num_units=500, l5_nonlinearity=sigmoid,
l6_num_units=1000, l6_nonlinearity=sigmoid,
l7_num_units=2000, l7_nonlinearity=sigmoid,
output_num_units=1200, output_nonlinearity=linear,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
with open(path, 'rb') as f:
pretrained_nn = pickle.load(f)
if pretrained_nn is not None:
dbn.load_params_from(path)
return dbn
def load_finetuned_dbn(path):
"""
Load a fine tuned Deep Belief Net from file
:param path: path to deep belief net parameters
:return: deep belief net
"""
dbn = NeuralNet(
layers=[
('input', las.layers.InputLayer),
('l1', las.layers.DenseLayer),
('l2', las.layers.DenseLayer),
('l3', las.layers.DenseLayer),
('l4', las.layers.DenseLayer),
('l5', las.layers.DenseLayer),
('l6', las.layers.DenseLayer),
('l7', las.layers.DenseLayer),
('output', las.layers.DenseLayer)
],
input_shape=(None, 1200),
l1_num_units=2000, l1_nonlinearity=sigmoid,
l2_num_units=1000, l2_nonlinearity=sigmoid,
l3_num_units=500, l3_nonlinearity=sigmoid,
l4_num_units=50, l4_nonlinearity=linear,
l5_num_units=500, l5_nonlinearity=sigmoid,
l6_num_units=1000, l6_nonlinearity=sigmoid,
l7_num_units=2000, l7_nonlinearity=sigmoid,
output_num_units=1200, output_nonlinearity=linear,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
with open(path, 'rb') as f:
pretrained_nn = pickle.load(f)
if pretrained_nn is not None:
dbn.load_params_from(path)
return dbn
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 1000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 1000, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4