def highway_conv3(incoming, nonlinearity=nn.nonlinearities.rectify, **kwargs):
wh = nn.init.Orthogonal('relu')
bh = nn.init.Constant(0.0)
wt = nn.init.Orthogonal('relu')
bt = nn.init.Constant(-2.)
num_filters = incoming.output_shape[1]
# H
l_h = Conv2DDNNLayer(incoming, num_filters=num_filters,
filter_size=(3, 3), stride=(1, 1),
pad='same', W=wh, b=bh,
nonlinearity=nonlinearity)
# T
l_t = Conv2DDNNLayer(incoming, num_filters=num_filters,
filter_size=(3, 3), stride=(1, 1),
pad='same', W=wt, b=bt,
nonlinearity=T.nnet.sigmoid)
return HighwayLayer(gate=l_t, input1=l_h, input2=incoming)
python类rectify()的实例源码
def __init__(self, incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=True,
convolution=conv.conv1d_mc0, **kwargs):
if isinstance(incoming, tuple):
input_shape = incoming
else:
input_shape = incoming.output_shape
# Retrieve the supplied name, if it exists; otherwise use ''
if 'name' in kwargs:
basename = kwargs['name'] + '.'
# Create a separate version of kwargs for the contained layers
# which does not include 'name'
layer_kwargs = dict((key, arg) for key, arg in kwargs.items() if key != 'name')
else:
basename = ''
layer_kwargs = kwargs
self.conv1d = Conv1DLayer(InputLayer((None,) + input_shape[2:]), num_filters, filter_size, stride, pad,
untie_biases, W, b, nonlinearity, flip_filters, convolution, name=basename + "conv1d",
**layer_kwargs)
self.W = self.conv1d.W
self.b = self.conv1d.b
super(ConvTimeStep1DLayer, self).__init__(incoming, **kwargs)
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
**kwargs):
super(CustomDense, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
num_inputs = self.input_shape[-1]
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b",
regularizable=False)
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
def build_mlp(input_var=None):
l_in = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
l_hid1 = DenseLayer(
l_in, num_units=500,
nonlinearity=rectify,
W=lasagne.init.GlorotUniform())
l_hid1_drop = DropoutLayer(l_hid1, p=0.4)
l_hid2 = DenseLayer(
l_hid1_drop, num_units=300,
nonlinearity=rectify)
l_hid2_drop = DropoutLayer(l_hid2, p=0.4)
l_out = DenseLayer(
l_hid2_drop, num_units=10,
nonlinearity=softmax)
return l_out
# generator giving the batches
cnn_cascade_lasagne.py 文件源码
项目:Cascade-CNN-Face-Detection
作者: gogolgrind
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def __build_48_net__(self):
network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.batch_norm(network)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.Conv2DLayer(network,num_filters=64,filter_size=(3,3),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
return network
def network_classifier(self, input_var):
network = {}
network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')
return network
def build_net(nz=10):
# nz = size of latent code
#N.B. using batch_norm applies bn before non-linearity!
F=32
enc = InputLayer(shape=(None,1,28,28))
enc = Conv2DLayer(incoming=enc, num_filters=F*2, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=1, nonlinearity=lrelu(0.2),pad=2)
enc = reshape(incoming=enc, shape=(-1,F*4*7*7))
enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
#Generator networks
dec = InputLayer(shape=(None,nz))
dec = DenseLayer(incoming=dec, num_units=F*4*7*7)
dec = reshape(incoming=dec, shape=(-1,F*4,7,7))
dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
dec = Deconv2DLayer(incoming=dec, num_filters=1, filter_size=3, stride=1, nonlinearity=sigmoid, crop=1)
return enc, dec
def load_dbn(path='models/oulu_ae.mat'):
"""
load a pretrained dbn from path
:param path: path to the .mat dbn
:return: pretrained deep belief network
"""
# create the network using weights from pretrain_nn.mat
nn = sio.loadmat(path)
w1 = nn['w1']
w2 = nn['w2']
w3 = nn['w3']
w4 = nn['w4']
b1 = nn['b1'][0]
b2 = nn['b2'][0]
b3 = nn['b3'][0]
b4 = nn['b4'][0]
weights = [w1, w2, w3, w4]
biases = [b1, b2, b3, b4]
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
return weights, biases, shapes, nonlinearities
def load_dbn(path='models/oulu_ae.mat'):
"""
load a pretrained dbn from path
:param path: path to the .mat dbn
:return: pretrained deep belief network
"""
# create the network using weights from pretrain_nn.mat
nn = sio.loadmat(path)
w1 = nn['w1']
w2 = nn['w2']
w3 = nn['w3']
w4 = nn['w4']
b1 = nn['b1'][0]
b2 = nn['b2'][0]
b3 = nn['b3'][0]
b4 = nn['b4'][0]
weights = [w1, w2, w3, w4]
biases = [b1, b2, b3, b4]
nonlinearities = [rectify, rectify, rectify, linear]
shapes = [2000, 1000, 500, 50]
return weights, biases, shapes, nonlinearities
def test_load_params(self):
window = T.iscalar('theta')
inputs1 = T.tensor3('inputs1', dtype='float32')
mask = T.matrix('mask', dtype='uint8')
network = deltanet_majority_vote.load_saved_model('../oulu/results/best_models/1stream_mfcc_w3s3.6.pkl',
([500, 200, 100, 50], [rectify, rectify, rectify, linear]),
(None, None, 91), inputs1, (None, None), mask,
250, window, 10)
d = deltanet_majority_vote.extract_encoder_weights(network, ['fc1', 'fc2', 'fc3', 'bottleneck'],
[('w1', 'b1'), ('w2', 'b2'), ('w3', 'b3'), ('w4', 'b4')])
b = deltanet_majority_vote.extract_lstm_weights(network, ['f_blstm1', 'b_blstm1'],
['flstm', 'blstm'])
expected_keys = ['w1', 'w2', 'w3', 'w4', 'b1', 'b2', 'b3', 'b4']
keys = d.keys()
for k in keys:
assert k in expected_keys
assert type(d[k]) == np.ndarray
save_mat(d, '../oulu/models/oulu_1stream_mfcc_w3s3.mat')
def load_dbn(path='models/oulu_ae.mat'):
"""
load a pretrained dbn from path
:param path: path to the .mat dbn
:return: pretrained deep belief network
"""
# create the network using weights from pretrain_nn.mat
nn = sio.loadmat(path)
w1 = nn['w1']
w2 = nn['w2']
w3 = nn['w3']
w4 = nn['w4']
b1 = nn['b1'][0]
b2 = nn['b2'][0]
b3 = nn['b3'][0]
b4 = nn['b4'][0]
weights = [w1, w2, w3, w4]
biases = [b1, b2, b3, b4]
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
return weights, biases, shapes, nonlinearities
def extract_weights(ae):
weights = []
biases = []
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
ae_layers = ae.get_all_layers()
weights.append(ae_layers[1].W.astype('float32'))
weights.append(ae_layers[2].W.astype('float32'))
weights.append(ae_layers[3].W.astype('float32'))
weights.append(ae_layers[4].W.astype('float32'))
biases.append(ae_layers[1].b.astype('float32'))
biases.append(ae_layers[2].b.astype('float32'))
biases.append(ae_layers[3].b.astype('float32'))
biases.append(ae_layers[4].b.astype('float32'))
return weights, biases, shapes, nonlinearities
def extract_weights(ae):
weights = []
biases = []
shapes = [2000, 1000, 500, 50]
nonlinearities = [rectify, rectify, rectify, linear]
ae_layers = ae.get_all_layers()
weights.append(ae_layers[1].W.astype('float32'))
weights.append(ae_layers[2].W.astype('float32'))
weights.append(ae_layers[3].W.astype('float32'))
weights.append(ae_layers[4].W.astype('float32'))
biases.append(ae_layers[1].b.astype('float32'))
biases.append(ae_layers[2].b.astype('float32'))
biases.append(ae_layers[3].b.astype('float32'))
biases.append(ae_layers[4].b.astype('float32'))
return weights, biases, shapes, nonlinearities
def __init__(self, incoming, num_units,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, name=None, **kwargs):
"""
An extention of a regular dense layer, enables the sharing of weight between two tied hidden layers. In order
to tie two layers, the first should be initialized with an initialization function for the weights, the other
should get the weight matrix of the first at input
:param incoming: the input layer of this layer
:param num_units: output size
:param W: weight initialization, can be a initialization function or a given matrix
:param b: bias initialization
:param nonlinearity: non linearity function
:param name: string
:param kwargs:
"""
super(TiedDenseLayer, self).__init__(incoming, num_units, W, b, nonlinearity, name=name)
if not isinstance(W, lasagne.init.Initializer):
self.params[self.W].remove('trainable')
self.params[self.W].remove('regularizable')
if self.b and not isinstance(b, lasagne.init.Initializer):
self.params[self.b].remove('trainable')
def __init__(self, incoming, nonlinearity=nonlinearities.rectify,
**kwargs):
super(NonlinearityLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
def InceptionUpscaleLayer(incoming,param_dict,block_name):
branch = [0]*len(param_dict)
# Loop across branches
for i,dict in enumerate(param_dict):
for j,style in enumerate(dict['style']): # Loop up branch
branch[i] = TC2D(
incoming = branch[i] if j else incoming,
num_filters = dict['num_filters'][j],
filter_size = dict['filter_size'][j],
crop = dict['pad'][j] if 'pad' in dict else None,
stride = dict['stride'][j],
W = initmethod('relu'),
nonlinearity = dict['nonlinearity'][j],
name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
else NL(
incoming = lasagne.layers.dnn.Pool2DDNNLayer(
incoming = lasagne.layers.Upscale2DLayer(
incoming=incoming if j == 0 else branch[i],
scale_factor = dict['stride'][j]),
pool_size = dict['filter_size'][j],
stride = [1,1],
mode = dict['mode'][j],
pad = dict['pad'][j],
name = block_name+'_'+str(i)+'_'+str(j)),
nonlinearity = dict['nonlinearity'][j])
# Apply Batchnorm
branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
# Concatenate Sublayers
return CL(incomings=branch,name=block_name)
# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
def pd(num_layers=2,num_filters=32,filter_size=(3,3),pad=1,stride = (1,1),nonlinearity=elu,style='convolutional',bnorm=1,**kwargs):
input_args = locals()
input_args.pop('num_layers')
return {key:entry if type(entry) is list else [entry]*num_layers for key,entry in input_args.iteritems()}
# Possible Conv2DDNN convenience function. Remember to delete the C2D import at the top if you use this
# def C2D(incoming = None, num_filters = 32, filter_size= [3,3],pad = 'same',stride = [1,1], W = initmethod('relu'),nonlinearity = elu,name = None):
# return lasagne.layers.dnn.Conv2DDNNLayer(incoming,num_filters,filter_size,stride,pad,False,W,None,nonlinearity,False)
# Shape-Preserving Gaussian Sample layer for latent vectors with spatial dimensions.
# This is a holdover from an "old" (i.e. I abandoned it last month) idea.
def has_ReLU(layer):
relus = [lasagne.nonlinearities.rectify, T.nnet.relu]
return (hasattr(layer, 'nonlinearity') and
layer.nonlinearity in relus)
def get_rectifier_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return lasagne.layers.NonlinearityLayer(input_layer,
nonlinearity=rectify)
return input_layer
def conv_params(num_filters, filter_size=(3, 3), stride=(1, 1), border_mode='same',
nonlinearity=rectify, W=init.Orthogonal(gain=1.0),
b=init.Constant(0.05), untie_biases=False, **kwargs):
args = {
'num_filters': num_filters,
'filter_size': filter_size,
'stride': stride,
'pad': border_mode, # The new version has 'pad' instead of 'border_mode'
'nonlinearity': nonlinearity,
'W': W,
'b': b,
'untie_biases': untie_biases,
}
args.update(kwargs)
return args
def dense_params(num_units, nonlinearity=rectify, **kwargs):
args = {
'num_units': num_units,
'nonlinearity': nonlinearity,
'W': init.Orthogonal(1.0),
'b': init.Constant(0.05),
}
args.update(kwargs)
return args
def build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
peepholes=False, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match highway incoming layer [batch * sent_length, num_filters, 1] --> [batch * sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], -1))
# dropout after cnn?
# if dropout:
# output_cnn_layer = lasagne.layers.DropoutLayer(output_cnn_layer, p=0.5)
# construct highway layer
highway_layer = HighwayDenseLayer(output_cnn_layer, nonlinearity=nonlinearities.rectify)
# reshape the layer to match lstm incoming layer [batch * sent_length, num_filters] --> [batch, sent_length, number_filters]
output_highway_layer = lasagne.layers.reshape(highway_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_highway_layer, incoming2], axis=2)
return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, **kwargs):
super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
if num_leading_axes >= len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"leaving no trailing axes for the dot product." %
(num_leading_axes, len(self.input_shape)))
elif num_leading_axes < -len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"requesting more trailing axes than there are input "
"dimensions." % (num_leading_axes, len(self.input_shape)))
self.num_leading_axes = num_leading_axes
if any(s is None for s in self.input_shape[num_leading_axes:]):
raise ValueError(
"A DenseLayer requires a fixed input shape (except for "
"the leading axes). Got %r for num_leading_axes=%d." %
(self.input_shape, self.num_leading_axes))
num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b",
regularizable=False)
if args.regL1 is True:
self.L1 = self.add_param(init.Constant(args.regInit['L1']),
(num_inputs, num_units), name="L1")
if args.regL2 is True:
self.L2 = self.add_param(init.Constant(args.regInit['L2']),
(num_inputs, num_units), name="L2")
def __init__(
self, incoming, num_units,
W=init.Constant(0.1),
b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
**kwargs
):
super(Tensor4LinearLayer, self).__init__(incoming, **kwargs)
num_inputs = self.input_shape[-1]
self.num_units = num_units
self.W = self.add_param(
W, (num_inputs, num_units),
name="W"
)
if b:
self.b = self.add_param(
b,
(
self.input_shape[1],
self.input_shape[2], self.num_units
)
)
else:
self.b = None
if nonlinearity:
self.nonlinearity = nonlinearity
else:
self.nonlinearity = nonlinearities.identity
def __init__(
self, incoming, num_units,
W=init.Constant(0.1),
b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
**kwargs
):
super(Tensor3LinearLayer, self).__init__(incoming, **kwargs)
num_inputs = self.input_shape[-1]
self.num_units = num_units
self.W = self.add_param(
W, (num_inputs, num_units),
name="W"
)
if b:
self.b = self.add_param(
b,
(
self.input_shape[1], self.num_units
)
)
else:
self.b = None
if nonlinearity:
self.nonlinearity = nonlinearity
else:
self.nonlinearity = nonlinearities.identity
cnn_cascade_lasagne.py 文件源码
项目:Cascade-CNN-Face-Detection
作者: gogolgrind
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def __build_12_net__(self):
network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
network = layers.dropout(network, p=0.1)
network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
network = layers.batch_norm(network)
network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
network = layers.DropoutLayer(network,p=0.3)
network = layers.DenseLayer(network,num_units = 16,nonlinearity = relu)
network = layers.batch_norm(network)
network = layers.DropoutLayer(network,p=0.3)
network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
return network