python类Conv2DLayer()的实例源码

pattern_based.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _set_inverse_parameters(self, patterns=None):
        self.trainable_layers = [self.inverse_map[l]
                                 for l in L.get_all_layers(self.output_layer)
                                 if type(l) in [L.Conv2DLayer, L.DenseLayer]]
        if patterns is not None:
            if type(patterns) is list:
                patterns = patterns[0]
            for i,layer in enumerate(self.trainable_layers):
                pattern = patterns['A'][i]
                if pattern.ndim == 4:
                    pattern = pattern.transpose(1,0,2,3)
                elif pattern.ndim == 2:
                    pattern = pattern.T
                layer.W.set_value(pattern)
        else:
            print("Patterns not given, explanation is random.")
enhance.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None)
enhance.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------
train_fcae.py 文件源码 项目:experiments 作者: tencia 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_fcae(input_var, channels=1):
    ret = {}
    ret['input'] = layer = InputLayer(shape=(None, channels, None, None), input_var=input_var)
    ret['conv1'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=5, pad='full'))
    ret['pool1'] = layer =  MaxPool2DLayer(layer, pool_size=2)
    ret['conv2'] = layer = bn(Conv2DLayer(layer, num_filters=256, filter_size=3, pad='full'))
    ret['pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['conv3'] = layer = bn(Conv2DLayer(layer, num_filters=32, filter_size=3, pad='full'))
    ret['enc'] = layer = GlobalPoolLayer(layer)
    ret['ph1'] = layer = NonlinearityLayer(layer, nonlinearity=None)
    ret['ph2'] = layer = NonlinearityLayer(layer, nonlinearity=None)
    ret['unenc'] = layer = bn(InverseLayer(layer, ret['enc']))
    ret['deconv3'] = layer = bn(Conv2DLayer(layer, num_filters=256, filter_size=3))
    ret['depool2'] = layer = InverseLayer(layer, ret['pool2'])
    ret['deconv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['depool1'] = layer = InverseLayer(layer, ret['pool1'])
    ret['output'] = layer = Conv2DLayer(layer, num_filters=1, filter_size=5,
                                     nonlinearity=nn.nonlinearities.sigmoid)
    return ret
enhance.py 文件源码 项目:DeepRes 作者: Aneeshers 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None)
enhance.py 文件源码 项目:DeepRes 作者: Aneeshers 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------
cnn_cascade_lasagne.py 文件源码 项目:Cascade-CNN-Face-Detection 作者: gogolgrind 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __build_48_net__(self):
        network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)        
        network = layers.batch_norm(network)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
enhance.py 文件源码 项目:neural-enhance 作者: alexjc 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None)
enhance.py 文件源码 项目:neural-enhance 作者: alexjc 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------
adda_network.py 文件源码 项目:adda_mnist64 作者: davidtellez 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def network_classifier(self, input_var):

        network = {}
        network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
        network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
        network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
        network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
        network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
        network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
        network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
        network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
        network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
        network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
        network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')

        return network
CAE.py 文件源码 项目:ConvolutionalAutoEncoder 作者: ToniCreswell 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_net(nz=10):
    # nz = size of latent code
    #N.B. using batch_norm applies bn before non-linearity!
    F=32
    enc = InputLayer(shape=(None,1,28,28))
    enc = Conv2DLayer(incoming=enc, num_filters=F*2, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=1, nonlinearity=lrelu(0.2),pad=2)
    enc = reshape(incoming=enc, shape=(-1,F*4*7*7))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
    #Generator networks
    dec = InputLayer(shape=(None,nz))
    dec = DenseLayer(incoming=dec, num_units=F*4*7*7)
    dec = reshape(incoming=dec, shape=(-1,F*4,7,7))
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=1, filter_size=3, stride=1, nonlinearity=sigmoid, crop=1)

    return enc, dec
__init__.py 文件源码 项目:aenet 作者: znaoya 项目源码 文件源码 阅读 62 收藏 0 点赞 0 评论 0
def build_model(self):
        '''
        Build Acoustic Event Net model
        :return:
        '''

        # A architecture 41 classes
        nonlin = lasagne.nonlinearities.rectify
        net = {}
        net['input'] = InputLayer((None, feat_shape[0], feat_shape[1], feat_shape[2]))  # channel, time. frequency
        # ----------- 1st layer group ---------------
        net['conv1a'] = ConvLayer(net['input'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv1b'] = ConvLayer(net['conv1a'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool1'] = MaxPool2DLayer(net['conv1b'], pool_size=(1, 2))  # (time, freq)
        # ----------- 2nd layer group ---------------
        net['conv2a'] = ConvLayer(net['pool1'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv2b'] = ConvLayer(net['conv2a'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool2'] = MaxPool2DLayer(net['conv2b'], pool_size=(2, 2))  # (time, freq)
        # ----------- fully connected layer group ---------------
        net['fc5'] = DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlin)
        net['fc6'] = DenseLayer(net['fc5'], num_units=1024, nonlinearity=nonlin)
        net['prob'] = DenseLayer(net['fc6'], num_units=41, nonlinearity=lasagne.nonlinearities.softmax)

        return net
convolutional_neural_network.py 文件源码 项目:kaggle-breast-cancer-prediction 作者: sirCamp 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def CNN(n_epochs):
    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),  # Convolutional layer.  Params defined below
            ('pool1', layers.MaxPool2DLayer),  # Like downsampling, for execution speed
            ('conv2', layers.Conv2DLayer),
            ('hidden3', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],

        input_shape=(None, 1, 6, 5),
        conv1_num_filters=8,
        conv1_filter_size=(3, 3),
        conv1_nonlinearity=lasagne.nonlinearities.rectify,

        pool1_pool_size=(2, 2),

        conv2_num_filters=12,
        conv2_filter_size=(1, 1),
        conv2_nonlinearity=lasagne.nonlinearities.rectify,

        hidden3_num_units=1000,
        output_num_units=2,
        output_nonlinearity=lasagne.nonlinearities.softmax,

        update_learning_rate=0.0001,
        update_momentum=0.9,

        max_epochs=n_epochs,
        verbose=0,
    )
    return net1
gradient_based.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _set_inverse_parameters(self, patterns=None):
        for l in L.get_all_layers(self.output_layer):
            if type(l) is L.Conv2DLayer:
                W = l.W.get_value()
                if l.flip_filters:
                    W = W[:,:,::-1,::-1]
                W = W.transpose(1,0,2,3)
                self.inverse_map[l].W.set_value(W)
            elif type(l) is L.DenseLayer:
                self.inverse_map[l].W.set_value(l.W.get_value().T)
relevance_based.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _set_inverse_parameters(self, patterns=None):
        for l in L.get_all_layers(self.output_layer):
            if type(l) is L.Conv2DLayer:
                W = l.W.get_value()
                if l.flip_filters:
                    W = W[:,:,::-1,::-1]
                W = W.transpose(1,0,2,3)
                self.inverse_map[l].W.set_value(W)
            elif type(l) is L.DenseLayer:
                self.inverse_map[l].W.set_value(l.W.get_value().T)
relevance_based.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _get_normalised_relevance_layer(self, layer, feeder):

        def add_epsilon(Zs):
            tmp = (T.cast(Zs >= 0, theano.config.floatX)*2.0 - 1.0)
            return  Zs + self.epsilon * tmp

        if isinstance(layer, L.DenseLayer):
            forward_layer = L.DenseLayer(layer.input_layer,
                                         layer.num_units,
                                         W=layer.W,
                                         b=layer.b,
                                         nonlinearity=None)
        elif isinstance(layer, L.Conv2DLayer):
            forward_layer = L.Conv2DLayer(layer.input_layer,
                                          num_filters=layer.num_filters,
                                          W=layer.W,
                                          b=layer.b,
                                          stride=layer.stride,
                                          filter_size=layer.filter_size,
                                          flip_filters=layer.flip_filters,
                                          untie_biases=layer.untie_biases,
                                          pad=layer.pad,
                                          nonlinearity=None)
        else:
            raise NotImplementedError()

        forward_layer = L.ExpressionLayer(forward_layer,
                                          lambda x: 1.0 / add_epsilon(x))
        feeder = L.ElemwiseMergeLayer([forward_layer, feeder],
                                      merge_function=T.mul)

        return feeder
base.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def _invert_layer(self, layer, feeder):
        layer_type = type(layer)

        if L.get_output_shape(feeder) != L.get_output_shape(layer):
            feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
        if layer_type is L.InputLayer:
            return self._invert_InputLayer(layer, feeder)
        elif layer_type is L.FlattenLayer:
            return self._invert_FlattenLayer(layer, feeder)
        elif layer_type is L.DenseLayer:
            return self._invert_DenseLayer(layer, feeder)
        elif layer_type is L.Conv2DLayer:
            return self._invert_Conv2DLayer(layer, feeder)
        elif layer_type is L.DropoutLayer:
            return self._invert_DropoutLayer(layer, feeder)
        elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
            return self._invert_MaxPoolingLayer(layer, feeder)
        elif layer_type is L.PadLayer:
            return self._invert_PadLayer(layer, feeder)
        elif layer_type is L.SliceLayer:
            return self._invert_SliceLayer(layer, feeder)
        elif layer_type is L.LocalResponseNormalization2DLayer:
            return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
        elif layer_type is L.GlobalPoolLayer:
            return self._invert_GlobalPoolLayer(layer, feeder)
        else:
            return self._invert_UnknownLayer(layer, feeder)
base.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _collect_layers(self):
        self.all_layers = L.get_all_layers(self.output_layer)
        ret = [l for l in self.all_layers if
                type(l) in [L.DenseLayer, L.Conv2DLayer]]

        return ret
combined.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _get_split(self, layer,
                   deterministic=True, conv_all_patches=True, **kwargs):

        # Get the patches and the outputs without the non-linearities.
        if type(layer) is L.DenseLayer:
            x, y = putils.get_dense_xy(layer, deterministic)
        elif type(layer) is L.Conv2DLayer:
            if conv_all_patches is True:
                x, y = putils.get_conv_xy_all(layer, deterministic)
            else:
                x, y = putils.get_conv_xy(layer, deterministic)
        else:
            raise ValueError("Unknown layer as input")

        # Create an output dictionary
        outputs = dict()

        for name, fun in subtypes:
            outputs[name] = dict()
            mrk_y = 1.0* T.cast(fun(y), dtype=theano.config.floatX)  # (N,O)
            y_current = y*mrk_y # This has a binary mask
            cnt_y = T.shape_padaxis(T.sum(mrk_y, axis=0), axis=0)  # (1,O)
            norm = T.maximum(cnt_y, 1.)

            # Count how many datapoints are considered
            outputs[name]['cnt'] = cnt_y

            # The mean of the current batch
            outputs[name]['m_y'] = T.shape_padaxis(y_current.sum(axis=0), axis=0) / norm  # (1,O) mean output for batch
            outputs[name]['m_x'] = T.dot(x.T, mrk_y) / norm  # (D,O) mean input for batch

            # The mean of the current batch
            outputs[name]['yty'] = T.shape_padaxis(T.sum(y_current ** 2., axis=0), axis=0) / norm  # (1,O)
            outputs[name]['xty'] = T.dot(x.T, y_current) / norm  # D,O

        return dict_to_list(outputs)
mixture.py 文件源码 项目:nn-patterns 作者: pikinder 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_split(self, layer,
                  deterministic=True, conv_all_patches=True, **kwargs):

        # Get the patches and the outputs without the non-linearities.
        if type(layer) is L.DenseLayer:
            x, y = get_dense_xy(layer, deterministic)
        elif type(layer) is L.Conv2DLayer:
            if conv_all_patches is True:
                x, y = get_conv_xy_all(layer, deterministic)
            else:
                x, y = get_conv_xy(layer, deterministic)
        else:
            raise ValueError("Unknown layer as input")

        # Create an output dictionary
        outputs = dict()

        for name, fun in subtypes:
            outputs[name] = dict()
            mrk_y = 1.0* T.cast(fun(y), dtype=theano.config.floatX)  # (N,O)
            y_current = y*mrk_y # This has a binary mask
            cnt_y = T.shape_padaxis(T.sum(mrk_y, axis=0), axis=0)  # (1,O)
            norm = T.maximum(cnt_y, 1.)

            # Count how many datapoints are considered
            outputs[name]['cnt'] = cnt_y

            # The mean of the current batch
            outputs[name]['m_y'] = T.shape_padaxis(y_current.sum(axis=0), axis=0) / norm  # (1,O) mean output for batch
            outputs[name]['m_x'] = T.dot(x.T, mrk_y) / norm  # (D,O) mean input for batch

            # The mean of the current batch
            outputs[name]['yty'] = T.shape_padaxis(T.sum(y_current ** 2., axis=0), axis=0) / norm  # (1,O)
            outputs[name]['xty'] = T.dot(x.T, y_current) / norm  # D,O

        return dict_to_list(outputs)
began_network.py 文件源码 项目:began 作者: davidtellez 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def conv_layer(input, n_filters, stride, name, network_weights, nonlinearity=elu, bn=False):

    layer = Conv2DLayer(input, num_filters=n_filters, filter_size=3, stride=stride, pad='same',
                        nonlinearity=nonlinearity, name=name, W=get_W(network_weights, name), b=get_b(network_weights, name))
    if bn:
        layer = batch_norm(layer)
    return layer
Deopen_classification.py 文件源码 项目:Deopen 作者: kimmo1019 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
    return network


#random search to initialize the weights
Deopen_regression.py 文件源码 项目:Deopen 作者: kimmo1019 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    #layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer15, num_units=256)
    network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
    return network


#random search to initialize the weights
enhance.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def make_layer(self, name, input, units, filter_size=(3,3), stride=(1,1), pad=(1,1), alpha=0.25):
        conv = ConvLayer(input, units, filter_size, stride=stride, pad=pad, nonlinearity=None)
        prelu = lasagne.layers.ParametricRectifierLayer(conv, alpha=lasagne.init.Constant(alpha))
        self.network[name+'x'] = conv
        self.network[name+'>'] = prelu
        return prelu
enhance.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setup_perceptual(self, input):
        """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights.
        """
        offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1))
        self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset)

        self.network['mse'] = self.network['percept']
        self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1)
        self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1)
        self.network['pool1']   = PoolLayer(self.network['conv1_2'], 2, mode='max')
        self.network['conv2_1'] = ConvLayer(self.network['pool1'],   128, 3, pad=1)
        self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1)
        self.network['pool2']   = PoolLayer(self.network['conv2_2'], 2, mode='max')
        self.network['conv3_1'] = ConvLayer(self.network['pool2'],   256, 3, pad=1)
        self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1)
        self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1)
        self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1)
        self.network['pool3']   = PoolLayer(self.network['conv3_4'], 2, mode='max')
        self.network['conv4_1'] = ConvLayer(self.network['pool3'],   512, 3, pad=1)
        self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1)
        self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1)
        self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1)
        self.network['pool4']   = PoolLayer(self.network['conv4_4'], 2, mode='max')
        self.network['conv5_1'] = ConvLayer(self.network['pool4'],   512, 3, pad=1)
        self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1)
        self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1)
        self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
inception_v3.py 文件源码 项目:lasagne_CNN_framework 作者: woshialex 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def bn_conv(input_layer, **kwargs):
    l = Conv2DLayer(input_layer, **kwargs)
    l = batch_norm(l, epsilon=0.001)
    return l
7_eeg_mw_electrodes_downsample.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_cnn(k_height=1, k_width=25, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 4, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
6_eeg_mw_electrodes.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def build_cnn(k_height=1, k_width=25, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 30, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (3,3), stride = (3,3))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
8_eeg_mw_bands.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build_cnn(k_height=1, k_width=25, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 5, NUM_ELECTRODES, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
9_eeg_mw_xcorr.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def build_cnn(k_height=3, k_width=3, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 5, 30, 30), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.


问题


面经


文章

微信
公众号

扫码关注公众号