python类batch_norm()的实例源码

enhance.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------
lsgan.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    print ("critic output:", layer.output_shape)
    return layer
lsgan_cifar10.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_critic(input_var=None, verbose=False):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 512, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    if verbose: print ("critic output:", layer.output_shape)
    return layer
enhance.py 文件源码 项目:DeepRes 作者: Aneeshers 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------
cnn_cascade_lasagne.py 文件源码 项目:Cascade-CNN-Face-Detection 作者: gogolgrind 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __build_48_net__(self):
        network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)        
        network = layers.batch_norm(network)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
enhance.py 文件源码 项目:neural-enhance 作者: alexjc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------
layers.py 文件源码 项目:gogh-figure 作者: joelmoniz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def instance_norm(layer, **kwargs):
    """
    The equivalent of Lasagne's `batch_norm()` convenience method, but for instance normalization.
    Refer: http://lasagne.readthedocs.io/en/latest/modules/layers/normalization.html#lasagne.layers.batch_norm
    """
    nonlinearity = getattr(layer, 'nonlinearity', None)
    if nonlinearity is not None:
        layer.nonlinearity = identity
    if hasattr(layer, 'b') and layer.b is not None:
        del layer.params[layer.b]
        layer.b = None
    bn_name = (kwargs.pop('name', None) or
               (getattr(layer, 'name', None) and layer.name + '_bn'))
    layer = InstanceNormLayer(layer, name=bn_name, **kwargs)
    if nonlinearity is not None:
        nonlin_name = bn_name and bn_name + '_nonlin'
        layer = NonlinearityLayer(layer, nonlinearity, name=nonlin_name)
    return layer

# TODO: Add normalization
motionDetector_lasagne.py 文件源码 项目:deepgestures_lasagne 作者: nneverova 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_network(self, input_var=None):

        self.network= {}

        self.network['input'] = lasagne.layers.InputLayer(shape=(self.batch_size, self.input_size[0]),
                                         input_var=self.x)


        # Add a fully-connected layer of 800 units, using the linear rectifier, and
        # initializing weights with Glorot's scheme (which is the default anyway):
        self.network['FC_1'] = batch_norm(lasagne.layers.DenseLayer(
                lasagne.layers.dropout(self.network['input'], p=self.dropout_rates[0]),  num_units=self.fc_layers[0],
                nonlinearity=lasagne.nonlinearities.tanh,
                W=lasagne.init.GlorotUniform()))


        # Finally, we'll add the fully-connected output layer, of 10 softmax units:
        self.network['prob'] = lasagne.layers.DenseLayer(
                lasagne.layers.dropout(self.network['FC_1'], p=self.dropout_rates[1]), num_units=self.fc_layers[1],
                nonlinearity=lasagne.nonlinearities.softmax)


        # Each layer is linked to its incoming layer(s), so we only need to pass
        # the output layer to give access to a network in Lasagne:
        return self.network
CAE.py 文件源码 项目:ConvolutionalAutoEncoder 作者: ToniCreswell 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_net(nz=10):
    # nz = size of latent code
    #N.B. using batch_norm applies bn before non-linearity!
    F=32
    enc = InputLayer(shape=(None,1,28,28))
    enc = Conv2DLayer(incoming=enc, num_filters=F*2, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=1, nonlinearity=lrelu(0.2),pad=2)
    enc = reshape(incoming=enc, shape=(-1,F*4*7*7))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
    #Generator networks
    dec = InputLayer(shape=(None,nz))
    dec = DenseLayer(incoming=dec, num_units=F*4*7*7)
    dec = reshape(incoming=dec, shape=(-1,F*4,7,7))
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=1, filter_size=3, stride=1, nonlinearity=sigmoid, crop=1)

    return enc, dec
ncg.py 文件源码 项目:neural-coarse-graining 作者: arayabrain 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def build_model():
    net = {}

    net["input1"] = lasagne.layers.InputLayer(shape=(None,data.shape[1],None), input_var = invar1 )

    # Transformer part of the network - in-place convolution to transform to the new coarse-grained classes
    net["transform1"] = batch_norm(lasagne.layers.Conv1DLayer(incoming=net["input1"], num_filters=args.tr_filt1, filter_size=args.tr_fs1, pad="same", nonlinearity=leakyReLU, W = lasagne.init.GlorotUniform(gain='relu')))
    net["transform2"] = batch_norm(lasagne.layers.Conv1DLayer(incoming=net["transform1"], num_filters=args.tr_filt2, filter_size=args.tr_fs2, pad="same", nonlinearity=leakyReLU, W = lasagne.init.GlorotUniform(gain='relu')))

    if args.continuous: # If we have continuous CG variables, use a tanh nonlinearity for output. Otherwise, use softmax to treat it as a probability distribution
        net["transform3"] = (lasagne.layers.Conv1DLayer(incoming=net["transform2"], num_filters=CLASSES, filter_size=1, pad="same", nonlinearity=lasagne.nonlinearities.tanh, W = lasagne.init.GlorotUniform(gain='relu')))
    else:
        net["transform3"] = (lasagne.layers.Conv1DLayer(incoming=net["transform2"], num_filters=CLASSES, filter_size=1, pad="same", nonlinearity=convSoftmax, W = lasagne.init.GlorotUniform(gain='relu')))

    # Predictor part. Take the coarse-grained classes and predict them at an offset of DISTANCE
    net["predictor1"] = batch_norm(lasagne.layers.Conv1DLayer(incoming = net["transform3"], num_filters = args.pr_filt1, filter_size=args.pr_fs1, pad="same", nonlinearity = leakyReLU, W = lasagne.init.GlorotUniform(gain='relu')))
    net["predictor2"] = batch_norm(lasagne.layers.Conv1DLayer(incoming = net["predictor1"], num_filters = args.pr_filt2, filter_size=args.pr_fs2, pad="same", nonlinearity = leakyReLU, W = lasagne.init.GlorotUniform(gain='relu')))

    if args.continuous: # If we have continuous CG variables, use a tanh nonlinearity for output. Otherwise, use softmax to treat it as a probability distribution
        net["predictor3"] = (lasagne.layers.Conv1DLayer(incoming = net["predictor2"], num_filters = CLASSES, filter_size=1, pad="same", nonlinearity = lasagne.nonlinearities.tanh, W = lasagne.init.GlorotUniform(gain='relu')))
    else:
        net["predictor3"] = (lasagne.layers.Conv1DLayer(incoming = net["predictor2"], num_filters = CLASSES, filter_size=1, pad="same", nonlinearity = convSoftmax, W = lasagne.init.GlorotUniform(gain='relu')))

    return net
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def MDBLOCK(incoming,num_filters,scales,name,nonlinearity):
    return NL(BN(ESL([incoming,
         MDCL(NL(BN(MDCL(NL(BN(incoming,name=name+'bnorm0'),nonlinearity),num_filters,scales,name),name=name+'bnorm1'),nonlinearity),
              num_filters,
              scales,
              name+'2')]),name=name+'bnorm2'),nonlinearity)  

# Gaussian Sample Layer for VAE from Tencia Lee
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def InceptionUpscaleLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = TC2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                crop = dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(
                    incoming = lasagne.layers.dnn.Pool2DDNNLayer(
                        incoming = lasagne.layers.Upscale2DLayer(
                            incoming=incoming if j == 0 else branch[i],
                            scale_factor = dict['stride'][j]),
                        pool_size = dict['filter_size'][j],
                        stride = [1,1],
                        mode = dict['mode'][j],
                        pad = dict['pad'][j],
                        name = block_name+'_'+str(i)+'_'+str(j)),
                    nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
began_network.py 文件源码 项目:began 作者: davidtellez 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv_layer(input, n_filters, stride, name, network_weights, nonlinearity=elu, bn=False):

    layer = Conv2DLayer(input, num_filters=n_filters, filter_size=3, stride=stride, pad='same',
                        nonlinearity=nonlinearity, name=name, W=get_W(network_weights, name), b=get_b(network_weights, name))
    if bn:
        layer = batch_norm(layer)
    return layer
began_network.py 文件源码 项目:began 作者: davidtellez 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def transposed_conv_layer(input, n_filters, stride, name, network_weights, output_size, nonlinearity=elu, bn=False):

    layer = TransposedConv2DLayer(input, n_filters, filter_size=3, stride=stride, W=get_W(network_weights, name),
                                  b=get_b(network_weights, name), nonlinearity=nonlinearity, name=name, crop='same',
                                  output_size=output_size)
    if bn:
        layer = batch_norm(layer)
    return layer
began_network.py 文件源码 项目:began 作者: davidtellez 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def dense_layer(input, n_units, name, network_weights, nonlinearity=None, bn=False):

    layer = DenseLayer(input, num_units=n_units, nonlinearity=nonlinearity, name=name,
                       W=get_W(network_weights, name), b=get_b(network_weights, name))
    if bn:
        layer = batch_norm(layer)
    return layer
lsgan.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
lsgan_cifar10.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_generator(input_var=None, verbose=False):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 1024*4*4))
    layer = ReshapeLayer(layer, ([0], 1024, 4, 4))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 512, 5, stride=2, crop='same',
                                     output_size=8))
    layer = batch_norm(Deconv2DLayer(layer, 256, 5, stride=2, crop='same',
                                     output_size=16))
    layer = Deconv2DLayer(layer, 3, 5, stride=2, crop='same', output_size=32,
                          nonlinearity=sigmoid)
    if verbose: print ("Generator output:", layer.output_shape)
    return layer
wgan.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
dcgan.py 文件源码 项目:deep-learning-models 作者: kuleshov 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def create_model(self, X, Z, n_dim, n_out, n_chan=1):
    # params
    n_lat = 100 # latent variables
    n_g_hid1 = 1024 # size of hidden layer in generator layer 1
    n_g_hid2 = 128 # size of hidden layer in generator layer 2
    n_out = n_dim * n_dim * n_chan # total dimensionality of output

    if self.model == 'gaussian': 
      raise Exception('Gaussian variables currently nor supported in GAN')

    # create the generator network
    l_g_in = lasagne.layers.InputLayer(shape=(None, n_lat), input_var=Z)
    l_g_hid1 = batch_norm(lasagne.layers.DenseLayer(l_g_in, n_g_hid1))
    l_g_hid2 = batch_norm(lasagne.layers.DenseLayer(l_g_hid1, n_g_hid2*7*7))
    l_g_hid2 = lasagne.layers.ReshapeLayer(l_g_hid2, ([0], n_g_hid2, 7, 7))
    l_g_dc1 = batch_norm(Deconv2DLayer(l_g_hid2, 64, 5, stride=2, pad=2))
    l_g = Deconv2DLayer(l_g_dc1, n_chan, 5, stride=2, pad=2, 
            nonlinearity=lasagne.nonlinearities.sigmoid)
    print ("Generator output:", l_g.output_shape)

    # create the discriminator network
    lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
    l_d_in = lasagne.layers.InputLayer(shape=(None, n_chan, n_dim, n_dim), 
                                       input_var=X)
    l_d_hid1 = batch_norm(lasagne.layers.Conv2DLayer(
        l_d_in, num_filters=64, filter_size=5, stride=2, pad=2,
        nonlinearity=lrelu, name='l_d_hid1'))
    l_d_hid2 = batch_norm(lasagne.layers.Conv2DLayer(
        l_d_hid1, num_filters=128, filter_size=5, stride=2, pad=2,
        nonlinearity=lrelu, name='l_d_hid2'))
    l_d_hid3 = batch_norm(lasagne.layers.DenseLayer(l_d_hid2, 1024, nonlinearity=lrelu))
    l_d = lasagne.layers.DenseLayer(l_d_hid3, 1, nonlinearity=lasagne.nonlinearities.sigmoid)
    print ("Discriminator output:", l_d.output_shape)

    return l_g, l_d
cnn_cascade_lasagne.py 文件源码 项目:Cascade-CNN-Face-Detection 作者: gogolgrind 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __build_12_net__(self):

        network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DropoutLayer(network,p=0.3)        
        network = layers.DenseLayer(network,num_units = 16,nonlinearity = relu)
        network = layers.batch_norm(network)
        network = layers.DropoutLayer(network,p=0.3)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
cnn_cascade_lasagne.py 文件源码 项目:Cascade-CNN-Face-Detection 作者: gogolgrind 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __build_24_net__(self):

        network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DropoutLayer(network,p=0.5)
        network = layers.batch_norm(network)
        network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
        network = layers.DropoutLayer(network,p=0.5)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
cnn_cascade_lasagne.py 文件源码 项目:Cascade-CNN-Face-Detection 作者: gogolgrind 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __build_48_calib_net__(self):
        network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2))
        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2))
        network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
        return network
models.py 文件源码 项目:diagnose-heart 作者: woshialex 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def build_fcn_segmenter(input_var, shape, version=2):
    ret = {}

    if version == 2:
        ret['input'] = la = InputLayer(shape, input_var)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=7,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3,
            pad='full'))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['output'] = la = Conv2DLayer(la, num_filters=1, filter_size=7,
                pad='full', nonlinearity=nn.nonlinearities.sigmoid)

    return ret, nn.layers.get_output(ret['output']), \
            nn.layers.get_output(ret['output'], deterministic=True)
audioClassifier_lasagne.py 文件源码 项目:deepgestures_lasagne 作者: nneverova 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_network(self, input_var=None):
        if not input_var is None: self.sinputs = input_var

        self.network['input'] = lasagne.layers.InputLayer(shape=(self.batch_size, 1, self.input_size['audio'][0],self.input_size['audio'][1]),
                                                          input_var=self.sinputs[0])

        self.network['Conv2D_1'] = batch_norm(lasagne.layers.Conv2DLayer(
            lasagne.layers.dropout(self.network['input'], p=self.dropout_rates[0]) , num_filters=25, filter_size=(5, 5),
            nonlinearity=lasagne.nonlinearities.tanh,
            W=lasagne.init.GlorotUniform()))

        self.network['MaxPool2D_1'] = lasagne.layers.MaxPool2DLayer(self.network['Conv2D_1'], pool_size=(1, 1))

        self.network['FC_1'] = batch_norm(lasagne.layers.DenseLayer(
            lasagne.layers.dropout(self.network['MaxPool2D_1'], p=self.dropout_rates[1]),
            num_units=self.fc_layers[0],
            nonlinearity=lasagne.nonlinearities.tanh))

        self.network['FC_N'] = batch_norm(lasagne.layers.DenseLayer(lasagne.layers.dropout(self.network['FC_1'], p=self.dropout_rates[2]),
            num_units=self.fc_layers[1],
            nonlinearity=lasagne.nonlinearities.tanh))


        self.network['prob'] =  batch_norm(lasagne.layers.DenseLayer(
            lasagne.layers.dropout(self.network['FC_N'], p=self.dropout_rates[3]),
            num_units=self.nclasses,
            nonlinearity=lasagne.nonlinearities.softmax))

        return self.network
skeletonClassifier_lasagne.py 文件源码 项目:deepgestures_lasagne 作者: nneverova 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def build_network(self, input_var=None, batch_size = None):

        print "build_network() in SkeletonClassifier invoked"
        print self.sinputs

        if not input_var is None: self.sinputs = input_var
        if batch_size: self.batch_size = batch_size



        if not input_var is None: self.sinputs = input_var
        if not batch_size is None:
            self.batch_size = batch_size

        self.network['input'] = lasagne.layers.InputLayer(shape=(self.batch_size,self.nframes,1,self.dlength), input_var=self.sinputs[0])

        self.network['FC_1'] = batch_norm(lasagne.layers.DenseLayer( lasagne.layers.dropout(self.network['input'], p=self.dropout_rates[1]),
                    num_units=self.fc_layers[0],nonlinearity=lasagne.nonlinearities.tanh))

        self.network['FC_2'] = batch_norm(lasagne.layers.DenseLayer(
                    lasagne.layers.dropout(self.network['FC_1'], p=self.dropout_rates[2]),
                    num_units=self.fc_layers[1],
                    nonlinearity=lasagne.nonlinearities.tanh))

        self.network['FC_3'] = batch_norm(lasagne.layers.DenseLayer(
                    lasagne.layers.dropout(self.network['FC_2'], p=self.dropout_rates[3]),
                    num_units=self.fc_layers[2],
                    nonlinearity=lasagne.nonlinearities.tanh))

        self.network['prob'] = lasagne.layers.DenseLayer(
                    lasagne.layers.dropout(self.network['FC_3'], p=.2),
                    num_units=self.nclasses,
                    nonlinearity=lasagne.nonlinearities.softmax)

        return self.network
videoClassifier_lasagne.py 文件源码 项目:deepgestures_lasagne 作者: nneverova 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def build_network(self, input_var = None, batch_size = None):


        print "build_network in VideoClassifier executed.."
        print "inputs are : " , self.sinputs

        if not input_var is None: self.sinputs = input_var
        if not batch_size is None:
            self.batch_size = batch_size


        # Merge or fuse or concatenate incoming layers
        self.network['ConcatLayer'] = lasagne.layers.ConcatLayer([self.right_network['FC_2'], self.left_network['FC_2']], axis=1, cropping=None)


        self.network['FC_3'] = batch_norm(lasagne.layers.DenseLayer(
                            lasagne.layers.dropout(self.network['ConcatLayer'], p=self.dropout_rates[0]),
                            num_units=84,
                            nonlinearity=lasagne.nonlinearities.tanh))


        self.network['prob'] = batch_norm(lasagne.layers.DenseLayer(
                            lasagne.layers.dropout(self.network['FC_3'], p=self.dropout_rates[2]),
                            num_units=self.fc_layers[2],
                            nonlinearity=lasagne.nonlinearities.softmax))



        return self.network
FaceAlignment.py 文件源码 项目:DeepAlignmentNetwork 作者: MarekKowalski 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def createCNN(self):
        net = {}
        net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)       
        print("Input shape: {0}".format(net['input'].output_shape))

        #STAGE 1
        net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)

        net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)

        net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)

        net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)

        net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
        net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))

        net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
        net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)

        for i in range(1, self.nStages):
            self.addDANStage(i + 1, net)

        net['output'] = net['s' + str(self.nStages) + '_landmarks']

        return net
FaceAlignmentTraining.py 文件源码 项目:DeepAlignmentNetwork 作者: MarekKowalski 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def createCNN(self):
        net = {}
        net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)       
        print("Input shape: {0}".format(net['input'].output_shape))

        #STAGE 1
        net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)

        net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)

        net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)

        net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)

        net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
        net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))

        net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
        net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)

        for i in range(1, self.nStages):
            self.addDANStage(i + 1, net)

        net['output'] = net['s' + str(self.nStages) + '_landmarks']

        return net
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def InceptionLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = C2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad =  dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(lasagne.layers.dnn.Pool2DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['stride'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j]) if style=='pool'\
            else lasagne.layers.DilatedConv2DLayer(
                incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                dilation = dict['dilation'][j],
                # pad = dict['pad'][j] if 'pad' in dict else None,
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j))  if style== 'dilation'\
            else DL(
                    incoming = incoming if j==0 else branch[i],
                    num_units = dict['num_filters'][j],
                    W = initmethod('relu'),
                    b = None,
                    nonlinearity = dict['nonlinearity'][j],
                    name = block_name+'_'+str(i)+'_'+str(j))   
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to define an inception-style block with upscaling
network.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, hidden_sizes, hidden_nonlinearity,
                 output_nonlinearity, hidden_W_init=LI.GlorotUniform(), hidden_b_init=LI.Constant(0.),
                 output_W_init=LI.GlorotUniform(), output_b_init=LI.Constant(0.),
                 name=None, input_var=None, input_layer=None, input_shape=None, batch_norm=False):

        Serializable.quick_init(self, locals())

        if name is None:
            prefix = ""
        else:
            prefix = name + "_"

        if input_layer is None:
            l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var)
        else:
            l_in = input_layer
        self._layers = [l_in]
        l_hid = l_in
        for idx, hidden_size in enumerate(hidden_sizes):
            l_hid = L.DenseLayer(
                l_hid,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="%shidden_%d" % (prefix, idx),
                W=hidden_W_init,
                b=hidden_b_init,
            )
            if batch_norm:
                l_hid = L.batch_norm(l_hid)
            self._layers.append(l_hid)

        l_out = L.DenseLayer(
            l_hid,
            num_units=output_dim,
            nonlinearity=output_nonlinearity,
            name="%soutput" % (prefix,),
            W=output_W_init,
            b=output_b_init,
        )
        self._layers.append(l_out)
        self._l_in = l_in
        self._l_out = l_out
        # self._input_var = l_in.input_var
        self._output = L.get_output(l_out)
        LasagnePowered.__init__(self, [l_out])


问题


面经


文章

微信
公众号

扫码关注公众号