def load_finetuned_dbn(path):
"""
Load a fine tuned Deep Belief Net from file
:param path: path to deep belief net parameters
:return: deep belief net
"""
dbn = NeuralNet(
layers=[
('input', las.layers.InputLayer),
('l1', las.layers.DenseLayer),
('l2', las.layers.DenseLayer),
('l3', las.layers.DenseLayer),
('l4', las.layers.DenseLayer),
('l5', las.layers.DenseLayer),
('l6', las.layers.DenseLayer),
('l7', las.layers.DenseLayer),
('output', las.layers.DenseLayer)
],
input_shape=(None, 1200),
l1_num_units=2000, l1_nonlinearity=sigmoid,
l2_num_units=1000, l2_nonlinearity=sigmoid,
l3_num_units=500, l3_nonlinearity=sigmoid,
l4_num_units=50, l4_nonlinearity=linear,
l5_num_units=500, l5_nonlinearity=sigmoid,
l6_num_units=1000, l6_nonlinearity=sigmoid,
l7_num_units=2000, l7_nonlinearity=sigmoid,
output_num_units=1200, output_nonlinearity=linear,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
with open(path, 'rb') as f:
pretrained_nn = pickle.load(f)
if pretrained_nn is not None:
dbn.load_params_from(path)
return dbn
python类linear()的实例源码
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def load_finetuned_dbn(path):
"""
Load a fine tuned Deep Belief Net from file
:param path: path to deep belief net parameters
:return: deep belief net
"""
dbn = NeuralNet(
layers=[
('input', las.layers.InputLayer),
('l1', las.layers.DenseLayer),
('l2', las.layers.DenseLayer),
('l3', las.layers.DenseLayer),
('l4', las.layers.DenseLayer),
('l5', las.layers.DenseLayer),
('l6', las.layers.DenseLayer),
('l7', las.layers.DenseLayer),
('output', las.layers.DenseLayer)
],
input_shape=(None, 1200),
l1_num_units=2000, l1_nonlinearity=sigmoid,
l2_num_units=1000, l2_nonlinearity=sigmoid,
l3_num_units=500, l3_nonlinearity=sigmoid,
l4_num_units=50, l4_nonlinearity=linear,
l5_num_units=500, l5_nonlinearity=sigmoid,
l6_num_units=1000, l6_nonlinearity=sigmoid,
l7_num_units=2000, l7_nonlinearity=sigmoid,
output_num_units=1200, output_nonlinearity=linear,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
with open(path, 'rb') as f:
pretrained_nn = pickle.load(f)
if pretrained_nn is not None:
dbn.load_params_from(path)
return dbn
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def compile_delta_features():
# create input
input_var = T.tensor3('input', dtype='float32')
win_var = T.iscalar('theta')
weights, biases = autoencoder.load_dbn()
'''
activations = [sigmoid, sigmoid, sigmoid, linear, sigmoid, sigmoid, sigmoid, linear]
layersizes = [2000, 1000, 500, 50, 500, 1000, 2000, 1200]
ae = autoencoder.create_model(l_input, weights, biases, activations, layersizes)
print_network(ae)
reconstruct = las.layers.get_output(ae)
reconstruction_fn = theano.function([input_var], reconstruct, allow_input_downcast=True)
recon_img = reconstruction_fn(test_data_resized)
visualize_reconstruction(test_data_resized[225:250], recon_img[225:250])
'''
l_input = InputLayer((None, None, 1200), input_var, name='input')
symbolic_batchsize = l_input.input_var.shape[0]
symbolic_seqlen = l_input.input_var.shape[1]
en_activations = [sigmoid, sigmoid, sigmoid, linear]
en_layersizes = [2000, 1000, 500, 50]
l_reshape1 = ReshapeLayer(l_input, (-1, l_input.shape[-1]), name='reshape1')
l_encoder = autoencoder.create_model(l_reshape1, weights[:4], biases[:4], en_activations, en_layersizes)
encoder_len = las.layers.get_output_shape(l_encoder)[-1]
l_reshape2 = ReshapeLayer(l_encoder, (symbolic_batchsize, symbolic_seqlen, encoder_len), name='reshape2')
l_delta = DeltaLayer(l_reshape2, win_var, name='delta')
l_slice = SliceLayer(l_delta, indices=slice(50, None), axis=-1, name='slice') # extract the delta coefficients
l_reshape3 = ReshapeLayer(l_slice, (-1, l_slice.output_shape[-1]), name='reshape3')
print_network(l_reshape3)
delta_features = las.layers.get_output(l_reshape3)
delta_fn = theano.function([input_var, win_var], delta_features, allow_input_downcast=True)
return delta_fn
def build_bottleneck_layer(input_size, encode_size, sigma=0.3):
W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))
layers = [
(InputLayer, {'shape': (None, input_size)}),
(GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
(DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': linear, 'W': W}),
(DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
]
return W, layers
def create_decoder(input_size, decode_size, weights):
decoder_layers = [
(InputLayer, {'shape': (None, input_size)}),
(DenseLayer, {'name': 'decoder', 'num_units': decode_size, 'nonlinearity': linear, 'W': weights})
]
decoder = NeuralNet(
layers=decoder_layers,
max_epochs=50,
objective_loss_function=squared_error,
update=adadelta,
regression=True,
verbose=1
)
return decoder
def load_finetuned_dbn(path):
"""
Load a fine tuned Deep Belief Net from file
:param path: path to deep belief net parameters
:return: deep belief net
"""
dbn = NeuralNet(
layers=[
('input', las.layers.InputLayer),
('l1', las.layers.DenseLayer),
('l2', las.layers.DenseLayer),
('l3', las.layers.DenseLayer),
('l4', las.layers.DenseLayer),
('l5', las.layers.DenseLayer),
('l6', las.layers.DenseLayer),
('l7', las.layers.DenseLayer),
('output', las.layers.DenseLayer)
],
input_shape=(None, 1200),
l1_num_units=2000, l1_nonlinearity=sigmoid,
l2_num_units=1000, l2_nonlinearity=sigmoid,
l3_num_units=500, l3_nonlinearity=sigmoid,
l4_num_units=50, l4_nonlinearity=linear,
l5_num_units=500, l5_nonlinearity=sigmoid,
l6_num_units=1000, l6_nonlinearity=sigmoid,
l7_num_units=2000, l7_nonlinearity=sigmoid,
output_num_units=1200, output_nonlinearity=linear,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
with open(path, 'rb') as f:
pretrained_nn = pickle.load(f)
if pretrained_nn is not None:
dbn.load_params_from(path)
return dbn
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
return l_4
def create_model(incoming, options):
conv_num_filters1 = 100
conv_num_filters2 = 150
conv_num_filters3 = 200
filter_size1 = 5
filter_size2 = 5
filter_size3 = 3
pool_size = 2
encode_size = options['BOTTLENECK']
dense_mid_size = options['DENSE']
pad_in = 'valid'
pad_out = 'full'
scaled_tanh = create_scaled_tanh()
conv2d1 = Conv2DLayer(incoming, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
maxpool2d3 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d3')
bn2 = BatchNormLayer(maxpool2d3, name='batchnorm2')
conv2d4 = Conv2DLayer(bn2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d4', nonlinearity=scaled_tanh)
maxpool2d6 = MaxPool2DLayer(conv2d4, pool_size=pool_size, name='maxpool2d6', pad=(1,0))
bn3 = BatchNormLayer(maxpool2d6, name='batchnorm3')
conv2d7 = Conv2DLayer(bn3, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d7', nonlinearity=scaled_tanh)
reshape9 = ReshapeLayer(conv2d7, shape=([0], -1), name='reshape9') # 3000
reshape9_output = reshape9.output_shape[1]
bn8 = BatchNormLayer(reshape9, name='batchnorm8')
dense10 = DenseLayer(bn8, num_units=dense_mid_size, name='dense10', nonlinearity=scaled_tanh)
bn11 = BatchNormLayer(dense10, name='batchnorm11')
bottleneck = DenseLayer(bn11, num_units=encode_size, name='bottleneck', nonlinearity=linear)
# print_network(bottleneck)
dense12 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense12', nonlinearity=linear)
dense13 = DenseLayer(dense12, num_units=reshape9_output, W=dense10.W.T, nonlinearity=scaled_tanh, name='dense13')
reshape14 = ReshapeLayer(dense13, shape=([0], conv_num_filters3, 3, 5), name='reshape14') # 32 x 4 x 7
deconv2d19 = Deconv2DLayer(reshape14, conv2d7.input_shape[1], conv2d7.filter_size, stride=conv2d7.stride,
W=conv2d7.W, flip_filters=not conv2d7.flip_filters, name='deconv2d19', nonlinearity=scaled_tanh)
upscale2d16 = Upscale2DLayer(deconv2d19, scale_factor=pool_size, name='upscale2d16')
deconv2d17 = Deconv2DLayer(upscale2d16, conv2d4.input_shape[1], conv2d4.filter_size, stride=conv2d4.stride,
W=conv2d4.W, flip_filters=not conv2d4.flip_filters, name='deconv2d17', nonlinearity=scaled_tanh)
upscale2d18 = Upscale2DLayer(deconv2d17, scale_factor=pool_size, name='upscale2d18')
deconv2d19 = Deconv2DLayer(upscale2d18, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
reshape20 = ReshapeLayer(deconv2d19, ([0], -1), name='reshape20')
return reshape20, bottleneck
def create_pretrained_encoder(weights, biases, names, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name=names[0])
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name=names[1])
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name=names[2])
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name=names[3])
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def create_pretrained_encoder(weights, biases, names, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name=names[0])
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name=names[1])
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name=names[2])
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name=names[3])
return l_4
def create_pretrained_encoder(weights, biases, names, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name=names[0])
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name=names[1])
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name=names[2])
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name=names[3])
return l_4
def create_pretrained_encoder(weights, biases, incoming):
l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='bottleneck')
return l_4
def upsample(input_layer, **kwargs):
return Deconv2DLayer(
input_layer, num_filters=2, filter_size=4, stride=2,
crop=1, b=None, nonlinearity=linear, flip_filters=True, **kwargs)