def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
python类InputLayer()的实例源码
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def compile_encoder(encoderpath=None):
# create input
if encoderpath:
l_encoder = pickle.load(open(encoderpath, 'rb'))
input_var = las.layers.get_all_layers(l_encoder)[0].input_var
visualize_layer(las.layers.get_all_layers(l_encoder)[2], 40, 30)
else:
input_var = T.matrix('input', dtype='float32')
weights, biases = autoencoder.load_dbn()
en_activations = [sigmoid, sigmoid, sigmoid, linear]
en_layersizes = [2000, 1000, 500, 50]
l_input = InputLayer((None, 1200), input_var, name='input')
l_encoder = autoencoder.create_model(l_input, weights[:4], biases[:4], en_activations, en_layersizes)
print_network(l_encoder)
encoded_features = las.layers.get_output(l_encoder)
encode_fn = theano.function([input_var], encoded_features, allow_input_downcast=True)
return encode_fn
def extract_encoder(network, inputshape, start, end):
layers = las.layers.get_all_layers(network)
weights = []
biases = []
activations = []
layersizes = []
for l in layers[start:end]:
weights.append(l.W)
biases.append(l.b)
activations.append(l.nonlinearity)
layersizes.append(l.num_units)
input = T.matrix('input', dtype='float32')
encoder = InputLayer(inputshape, input, name='input')
encoder = autoencoder.create_pretrained_encoder(encoder, weights, biases, activations, layersizes)
return encoder
def build_encoder_layers(input_size, encode_size, sigma=0.5):
"""
builds an autoencoder with gaussian noise layer
:param input_size: input size
:param encode_size: encoded size
:param sigma: gaussian noise standard deviation
:return: Weights of encoder layer, denoising autoencoder layer
"""
W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))
layers = [
(InputLayer, {'shape': (None, input_size)}),
(GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
(DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': sigmoid, 'W': W}),
(DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
]
return W, layers
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=adadelta,
update_learning_rate=0.01,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def extract_encoder(dbn):
dbn_layers = dbn.get_all_layers()
encoder = NeuralNet(
layers=[
(InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
(DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
(DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
(DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
(DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
],
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.5,
objective_l2=0.005,
verbose=1,
regression=True
)
encoder.initialize()
return encoder
def build_model(self):
'''
Build Acoustic Event Net model
:return:
'''
# A architecture 41 classes
nonlin = lasagne.nonlinearities.rectify
net = {}
net['input'] = InputLayer((None, feat_shape[0], feat_shape[1], feat_shape[2])) # channel, time. frequency
# ----------- 1st layer group ---------------
net['conv1a'] = ConvLayer(net['input'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
net['conv1b'] = ConvLayer(net['conv1a'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
net['pool1'] = MaxPool2DLayer(net['conv1b'], pool_size=(1, 2)) # (time, freq)
# ----------- 2nd layer group ---------------
net['conv2a'] = ConvLayer(net['pool1'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
net['conv2b'] = ConvLayer(net['conv2a'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
net['pool2'] = MaxPool2DLayer(net['conv2b'], pool_size=(2, 2)) # (time, freq)
# ----------- fully connected layer group ---------------
net['fc5'] = DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlin)
net['fc6'] = DenseLayer(net['fc5'], num_units=1024, nonlinearity=nonlin)
net['prob'] = DenseLayer(net['fc6'], num_units=41, nonlinearity=lasagne.nonlinearities.softmax)
return net
def build_cnn(self):
# Building the network
layer_in = InputLayer(shape=(None, 784), input_var=self.input_var)
# Hidden layer
layer = DenseLayer(
layer_in,
num_units=self.hidden_size,
W=lasagne.init.Uniform(
range=(-np.sqrt(6. / (784 + self.hidden_size)),
np.sqrt(6. / (784 + self.hidden_size)))),
nonlinearity=tanh,
)
# LR layer
layer = DenseLayer(
layer,
num_units=self.output_size,
W=lasagne.init.Constant(0.),
nonlinearity=softmax,
)
return layer
def init_virtual(self):
self.seq_con = T.matrix('seq_cont', 'int32')
self.seq_con_mask = T.matrix('seq_cont_mask', floatX)
self.seq_que = T.matrix('seq_quest', 'int32')
self.seq_que_mask = T.matrix('seq_quest_mask', floatX)
self.vars = [
self.seq_con,
self.seq_con_mask,
self.seq_que,
self.seq_que_mask] + self.vars
self.in_con = InputLayer((None, None), self.seq_con)
self.in_con_mask = InputLayer((None, None), self.seq_con_mask)
self.in_que = InputLayer((None, None), self.seq_que)
self.in_que_mask = InputLayer((None, None), self.seq_que_mask)
def build_multi_dssm(input_var=None, num_samples=None, num_entries=6, num_ngrams=42**3, num_hid1=300, num_hid2=300, num_out=128):
"""Builds a DSSM structure in a Lasagne/Theano way.
The built DSSM is the neural network that computes the projection of only one paper.
The input ``input_var`` should have two dimensions: (``num_samples * num_entries``, ``num_ngrams``).
The output is then computed in a batch way: one paper at a time, but all papers from the same sample in the dataset are grouped
(cited papers, citing papers and ``num_entries - 2`` irrelevant papers).
Args:
input_var (:class:`theano.tensor.TensorType` or None): symbolic input variable of the DSSM
num_samples (int): the number of samples in the batch input dataset (number of rows)
num_entries (int): the number of compared papers in the DSSM structure
num_ngrams (int): the size of the vocabulary
num_hid1 (int): the number of units in the first hidden layer
num_hid2 (int): the number of units in the second hidden layer
num_out (int): the number of units in the output layer
Returns:
:class:`lasagne.layers.Layer`: the output layer of the DSSM
"""
assert (num_entries > 2)
# Initialise input layer
if num_samples is None:
num_rows = None
else:
num_rows = num_samples * num_entries
l_in = layers.InputLayer(shape=(num_rows, num_ngrams), input_var=input_var)
# Initialise the hidden and output layers or the DSSM
l_hid1 = layers.DenseLayer(l_in, num_units=num_hid1, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
l_hid2 = layers.DenseLayer(l_hid1, num_units=num_hid2, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
l_out = layers.DenseLayer(l_hid2, num_units=num_out, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
l_out = layers.ExpressionLayer(l_out, lambda X: X / X.norm(2), output_shape='auto')
return l_out
def concatenate(net, in_layer, concat_h, concat_vars, pos, nb_concat_features):
"""
Auxiliary function that checks whether we should concatenate the output of
a layer `in_layer` of a network `net` to some a tensor in `concat_vars`
Parameters
----------
net: dictionary containing layers of a network
in_layer: name of a layer in net
concat_h: list of layers to concatenate
concat_vars: list of variables (tensors) to concatenate
pos: position in lists `concat_h` and `concat_vars` we want to check
nb_concat_features: number of features in the layer we want to concatenate
"""
if pos < len(concat_h) and concat_h[pos] == 'input':
concat_h[pos] = in_layer
# if this is the layer we want to concatenate, create an InputLayer with the
# tensor we want to concatenate and a ConcatLayer that does the job afterwards
if in_layer in concat_h:
net[in_layer + '_h'] = InputLayer((None, nb_concat_features, None, None), concat_vars[pos])
net[in_layer + '_concat'] = ConcatLayer((net[in_layer + '_h'],
net[in_layer]), axis=1, cropping=None)
pos += 1
out = in_layer + '_concat'
laySize = net[out].output_shape
n_cl = laySize[1]
print('Number of feature maps (concat):', n_cl)
else:
out = in_layer
if concat_h and pos <= len(concat_h) and concat_h[pos-1] == 'noisy_input':
concat_h[pos-1] = 'input'
return pos, out
def concatenate_end2end(net, in_layer, concat_h, layer_h, pos, nb_concat_features):
"""
Auxiliary function that checks whether we should concatenate the output of
a layer `in_layer` of a network `net` to some a tensor in `concat_vars`
Parameters
----------
net: dictionary containing layers of a network
in_layer: name of a layer in net
concat_h: list of layers to concatenate
concat_vars: list of variables (tensors) to concatenate
pos: position in lists `concat_h` and `concat_vars` we want to check
nb_concat_features: number of features in the layer we want to concatenate
"""
if pos < len(concat_h) and concat_h[pos] == 'input':
concat_h[pos] = in_layer
# if this is the layer we want to concatenate, create an InputLayer with the
# tensor we want to concatenate and a ConcatLayer that does the job afterwards
if in_layer in concat_h:
net[in_layer + '_h'] = layer_h[pos]
net[in_layer + '_concat'] = ConcatLayer((net[in_layer + '_h'],
net[in_layer]), axis=1, cropping=None)
pos += 1
out = in_layer + '_concat'
laySize = net[out].output_shape
n_cl = laySize[1]
print('Number of feature maps (concat):', n_cl)
else:
out = in_layer
if concat_h and pos <= len(concat_h) and concat_h[pos-1] == 'noisy_input':
concat_h[pos-1] = 'input'
return pos, out
convolutional_neural_network.py 文件源码
项目:kaggle-breast-cancer-prediction
作者: sirCamp
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def CNN(n_epochs):
net1 = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer), # Convolutional layer. Params defined below
('pool1', layers.MaxPool2DLayer), # Like downsampling, for execution speed
('conv2', layers.Conv2DLayer),
('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, 6, 5),
conv1_num_filters=8,
conv1_filter_size=(3, 3),
conv1_nonlinearity=lasagne.nonlinearities.rectify,
pool1_pool_size=(2, 2),
conv2_num_filters=12,
conv2_filter_size=(1, 1),
conv2_nonlinearity=lasagne.nonlinearities.rectify,
hidden3_num_units=1000,
output_num_units=2,
output_nonlinearity=lasagne.nonlinearities.softmax,
update_learning_rate=0.0001,
update_momentum=0.9,
max_epochs=n_epochs,
verbose=0,
)
return net1
def _invert_InputLayer(self, layer, feeder):
assert isinstance(layer, L.InputLayer)
return feeder
def _invert_layer(self, layer, feeder):
layer_type = type(layer)
if L.get_output_shape(feeder) != L.get_output_shape(layer):
feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
if layer_type is L.InputLayer:
return self._invert_InputLayer(layer, feeder)
elif layer_type is L.FlattenLayer:
return self._invert_FlattenLayer(layer, feeder)
elif layer_type is L.DenseLayer:
return self._invert_DenseLayer(layer, feeder)
elif layer_type is L.Conv2DLayer:
return self._invert_Conv2DLayer(layer, feeder)
elif layer_type is L.DropoutLayer:
return self._invert_DropoutLayer(layer, feeder)
elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
return self._invert_MaxPoolingLayer(layer, feeder)
elif layer_type is L.PadLayer:
return self._invert_PadLayer(layer, feeder)
elif layer_type is L.SliceLayer:
return self._invert_SliceLayer(layer, feeder)
elif layer_type is L.LocalResponseNormalization2DLayer:
return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
elif layer_type is L.GlobalPoolLayer:
return self._invert_GlobalPoolLayer(layer, feeder)
else:
return self._invert_UnknownLayer(layer, feeder)
def _construct_layer_maps(self):
layers = L.get_all_layers(self.output_layer)
# Store inverse layers to enable merging.
self.inverse_map = {l: None for l in layers}
# Store the layers a specific layer feeds.
self.output_map = {l: [] for l in layers}
for layer in layers:
if type(layer) is not L.InputLayer:
if isinstance(layer, L.MergeLayer):
for feeder in layer.input_layers:
self.output_map[feeder].append(layer)
else:
self.output_map[layer.input_layer].append(layer)
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer16, num_units=256)
network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
return network
#random search to initialize the weights
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
#layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer15, num_units=256)
network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
return network
#random search to initialize the weights
def addInputLayer(self, **kwargs):
self.input_layer = InputLayer(name="input", **kwargs)
self.input_layer.inp_ndim = len(kwargs["shape"])