python类layers()的实例源码

dcgan.py 文件源码 项目:deep-learning-models 作者: kuleshov 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def create_objectives(self, deterministic=False):
    # load network
    l_g, l_d = self.network

    # load ouput
    g      = lasagne.layers.get_output(l_g, deterministic=deterministic)
    d_real = lasagne.layers.get_output(l_d, deterministic=deterministic)
    d_fake = lasagne.layers.get_output(l_d, g, deterministic=deterministic)

    # define loss
    loss_g = lasagne.objectives.binary_crossentropy(d_fake, 1).mean()
    loss_d = ( lasagne.objectives.binary_crossentropy(d_real, 1)
             + lasagne.objectives.binary_crossentropy(d_fake, 0) ).mean()

    # compute and store discriminator probabilities
    p_real = (d_real > 0.5).mean()
    p_fake = (d_fake < 0.5).mean()

    return loss_g, loss_d, p_real, p_fake
adgm.py 文件源码 项目:deep-learning-models 作者: kuleshov 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
                opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
    # save model that wil be created
    self.model = model
    self.n_sample = 1 # adjustable parameter, though 1 works best in practice

    self.n_batch = n_batch
    self.n_lat = 200
    self.n_dim = n_dim
    self.n_chan = n_chan
    self.n_batch = n_batch

    Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)

    # sample generation
    Z = T.matrix(dtype=theano.config.floatX) # noise matrix
    l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
        l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
        l_qa, l_qz  = self.network
    sample = lasagne.layers.get_output(l_px_mu,  {l_qz : Z}, deterministic=True)
    self.sample = theano.function([Z], sample, on_unused_input='warn')
depthGAN.py 文件源码 项目:crossingNet 作者: melonwan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, z_dim, batch_size = 100, lr = 0.0005, b1 =0.5):
        self.z_dim = z_dim
        self.batch_size = batch_size
        self.lr = lr
        self.b1 = b1
        self.z_std = 0.6 # used when z is normal distribution
        print 'depthGAN is initialized with z_dim=%d'%self.z_dim

        # build network
        self.noise_input_layer = lasagne.layers.InputLayer((None, self.z_dim))
        self.gen_depth_layer = \
            self.build_generative(self.noise_input_layer)
        self.depth_shape =\
            lasagne.layers.get_output_shape(self.gen_depth_layer)
        print 'gen build with generated depth shape={}'.format(self.depth_shape)
        self.build_discriminative()
q_network.py 文件源码 项目:Q-Optimality-Tightening 作者: ShibiHe 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_linear_network(self, input_width, input_height, output_dim,
                             num_frames, batch_size):
        """
        Build a simple linear learner.  Useful for creating
        tests that sanity-check the weight update code.
        """

        l_in = lasagne.layers.InputLayer(
            shape=(None, num_frames, input_width, input_height)
        )

        l_out = lasagne.layers.DenseLayer(
            l_in,
            num_units=output_dim,
            nonlinearity=None,
            W=lasagne.init.Constant(0.0),
            b=None
        )

        return l_out
enhance.py 文件源码 项目:DeepRes 作者: Aneeshers 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        self.network = collections.OrderedDict()
        self.network['img'] = InputLayer((None, 3, None, None))
        self.network['seed'] = InputLayer((None, 3, None, None))

        config, params = self.load_model()
        self.setup_generator(self.last_layer(), config)

        if args.train:
            concatenated = lasagne.layers.ConcatLayer([self.network['img'], self.network['out']], axis=0)
            self.setup_perceptual(concatenated)
            self.load_perceptual()
            self.setup_discriminator()
        self.load_generator(params)
        self.compile()

    #------------------------------------------------------------------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------
nn.py 文件源码 项目:deligan 作者: val-iisc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
                 W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
        super(Deconv2DLayer, self).__init__(incoming, **kwargs)
        self.target_shape = target_shape
        self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
        self.filter_size = lasagne.layers.dnn.as_tuple(filter_size, 2)
        self.stride = lasagne.layers.dnn.as_tuple(stride, 2)
        self.target_shape = target_shape

        self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
        self.W = self.add_param(W, self.W_shape, name="W")
        if b is not None:
            self.b = self.add_param(b, (target_shape[1],), name="b")
        else:
            self.b = None
nn.py 文件源码 项目:deligan 作者: val-iisc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
                 W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
        super(Deconv2DLayer, self).__init__(incoming, **kwargs)
        self.target_shape = target_shape
        self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
        self.filter_size = lasagne.layers.dnn.as_tuple(filter_size, 2)
        self.stride = lasagne.layers.dnn.as_tuple(stride, 2)
        self.target_shape = target_shape

        self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
        self.W = self.add_param(W, self.W_shape, name="W")
        if b is not None:
            self.b = self.add_param(b, (target_shape[1],), name="b")
        else:
            self.b = None
model_helpers.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def concatenate(net, in_layer, concat_h, concat_vars, pos, nb_concat_features):
    """
    Auxiliary function that checks whether we should concatenate the output of
    a layer `in_layer` of a network `net` to some a tensor in `concat_vars`

    Parameters
    ----------
    net: dictionary containing layers of a network
    in_layer: name of a layer in net
    concat_h: list of layers to concatenate
    concat_vars: list of variables (tensors) to concatenate
    pos: position in lists `concat_h` and `concat_vars` we want to check
    nb_concat_features: number of features in the layer we want to concatenate
    """
    if pos < len(concat_h) and concat_h[pos] == 'input':
        concat_h[pos] = in_layer

    # if this is the layer we want to concatenate, create an InputLayer with the
    # tensor we want to concatenate and a ConcatLayer that does the job afterwards
    if in_layer in concat_h:
        net[in_layer + '_h'] = InputLayer((None, nb_concat_features, None, None), concat_vars[pos])
        net[in_layer + '_concat'] = ConcatLayer((net[in_layer + '_h'],
                                            net[in_layer]), axis=1, cropping=None)
        pos += 1
        out = in_layer + '_concat'

        laySize = net[out].output_shape
        n_cl = laySize[1]
        print('Number of feature maps (concat):', n_cl)
    else:
        out = in_layer

    if concat_h and pos <= len(concat_h) and concat_h[pos-1] == 'noisy_input':
        concat_h[pos-1] = 'input'

    return pos, out
model_helpers.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def concatenate_end2end(net, in_layer, concat_h, layer_h, pos, nb_concat_features):
    """
    Auxiliary function that checks whether we should concatenate the output of
    a layer `in_layer` of a network `net` to some a tensor in `concat_vars`

    Parameters
    ----------
    net: dictionary containing layers of a network
    in_layer: name of a layer in net
    concat_h: list of layers to concatenate
    concat_vars: list of variables (tensors) to concatenate
    pos: position in lists `concat_h` and `concat_vars` we want to check
    nb_concat_features: number of features in the layer we want to concatenate
    """
    if pos < len(concat_h) and concat_h[pos] == 'input':
        concat_h[pos] = in_layer

    # if this is the layer we want to concatenate, create an InputLayer with the
    # tensor we want to concatenate and a ConcatLayer that does the job afterwards
    if in_layer in concat_h:
        net[in_layer + '_h'] = layer_h[pos]
        net[in_layer + '_concat'] = ConcatLayer((net[in_layer + '_h'],
                                            net[in_layer]), axis=1, cropping=None)
        pos += 1
        out = in_layer + '_concat'

        laySize = net[out].output_shape
        n_cl = laySize[1]
        print('Number of feature maps (concat):', n_cl)
    else:
        out = in_layer

    if concat_h and pos <= len(concat_h) and concat_h[pos-1] == 'noisy_input':
        concat_h[pos-1] = 'input'

    return pos, out
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def InceptionUpscaleLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = TC2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                crop = dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(
                    incoming = lasagne.layers.dnn.Pool2DDNNLayer(
                        incoming = lasagne.layers.Upscale2DLayer(
                            incoming=incoming if j == 0 else branch[i],
                            scale_factor = dict['stride'][j]),
                        pool_size = dict['filter_size'][j],
                        stride = [1,1],
                        mode = dict['mode'][j],
                        pad = dict['pad'][j],
                        name = block_name+'_'+str(i)+'_'+str(j)),
                    nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def pd(num_layers=2,num_filters=32,filter_size=(3,3),pad=1,stride = (1,1),nonlinearity=elu,style='convolutional',bnorm=1,**kwargs):
    input_args = locals()    
    input_args.pop('num_layers')
    return {key:entry if type(entry) is list else [entry]*num_layers for key,entry in input_args.iteritems()}  

# Possible Conv2DDNN convenience function. Remember to delete the C2D import at the top if you use this    
# def C2D(incoming = None, num_filters = 32, filter_size= [3,3],pad = 'same',stride = [1,1], W = initmethod('relu'),nonlinearity = elu,name = None):
    # return lasagne.layers.dnn.Conv2DDNNLayer(incoming,num_filters,filter_size,stride,pad,False,W,None,nonlinearity,False)

# Shape-Preserving Gaussian Sample layer for latent vectors with spatial dimensions.
# This is a holdover from an "old" (i.e. I abandoned it last month) idea.
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_params(self, unwrap_shared=True, **tags):
        params = []
        for l in self.layers:
            for p in l.get_params(**tags):
                params.append(p)
        return(params)        
        # params = [p for p in l.get_params(trainable=True) for l in self.layers]
        # return params
        # return [p for p in lay.get_params(unwrap_shared,**tags) for lay in self.layers]
        # return lasagne.layers.get_all_params(self.final_layer,trainable=True)
doodle.py 文件源码 项目:neural-doodle 作者: alexjc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_data(self):
        """Open the serialized parameters from a pre-trained network, and load them into the model created.
        """
        vgg19_file = os.path.join(os.path.dirname(__file__), 'vgg19_conv.pkl.bz2')
        if not os.path.exists(vgg19_file):
            error("Model file with pre-trained convolution layers not found. Download here...",
                  "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2")

        data = pickle.load(bz2.open(vgg19_file, 'rb'))
        params = lasagne.layers.get_all_param_values(self.network['main'])
        lasagne.layers.set_all_param_values(self.network['main'], data[:len(params)])
doodle.py 文件源码 项目:neural-doodle 作者: alexjc 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def setup(self, layers):
        """Setup the inputs and outputs, knowing the layers that are required by the optimization algorithm.
        """
        self.tensor_img = T.tensor4()
        self.tensor_map = T.tensor4()
        tensor_inputs = {self.network['img']: self.tensor_img, self.network['map']: self.tensor_map}
        outputs = lasagne.layers.get_output([self.network[l] for l in layers], tensor_inputs)
        self.tensor_outputs = {k: v for k, v in zip(layers, outputs)}
doodle.py 文件源码 项目:neural-doodle 作者: alexjc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_outputs(self, type, layers):
        """Fetch the output tensors for the network layers.
        """
        return [self.tensor_outputs[type+l] for l in layers]
doodle.py 文件源码 项目:neural-doodle 作者: alexjc 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def do_extract_patches(self, layers, size=3, stride=1):
        """This function builds a Theano expression that will get compiled an run on the GPU. It extracts 3x3 patches
        from the intermediate outputs in the model.
        """
        results = []
        for l, f in layers:
            # Use a Theano helper function to extract "neighbors" of specific size, seems a bit slower than doing
            # it manually but much simpler!
            patches = theano.tensor.nnet.neighbours.images2neibs(f, (size, size), (stride, stride), mode='valid')
            # Make sure the patches are in the shape required to insert them into the model as another layer.
            patches = patches.reshape((-1, patches.shape[0] // f.shape[1], size, size)).dimshuffle((1, 0, 2, 3))
            # Calculate the magnitude that we'll use for normalization at runtime, then store...
            results.extend([patches] + self.compute_norms(T, l, patches))
        return results
loc2lang_withpi.py 文件源码 项目:geomdn 作者: afshinrahimi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def set_params(self, params):
        lasagne.layers.set_all_param_values(self.l_out, params)
nn.py 文件源码 项目:melanoma-transfer 作者: learningtitans 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def create_net(config, **kwargs):
    args = {
        'layers': config.layers,
        'batch_iterator_train': iterator.ResampleIterator(
            config, batch_size=config.get('batch_size_train')),
        'batch_iterator_test': iterator.SharedIterator(
            config, deterministic=True,
            batch_size=config.get('batch_size_test')),
        'on_epoch_finished': [
            Schedule('update_learning_rate', config.get('schedule'),
                     weights_file=config.final_weights_file),
            SaveBestWeights(weights_file=config.weights_file,
                            loss='kappa', greater_is_better=True,),
            SaveWeights(config.weights_epoch, every_n_epochs=5),
            SaveWeights(config.weights_best, every_n_epochs=1, only_best=True),
        ],
        'objective': get_objective(),
        'use_label_encoder': False,
        'eval_size': 0.1,
        'regression': False,
        'max_epochs': 1000,
        'verbose': 1,
        'update_learning_rate': theano.shared(
            util.float32(config.get('schedule')[0])),
        'update': nesterov_momentum,
        'update_momentum': 0.1,
        'custom_scores': [('kappa', util.kappa)],
    }
    args.update(kwargs)
    net = Net(**args)
    return net
lasagne_layers.py 文件源码 项目:snn4hrl 作者: florensacc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, incomings, name=None):  # incomings is a list (tuple) of 2 layers. The second is the "selector"
        super(BilinearIntegrationLayer, self).__init__(incomings, name)
AlexNet.py 文件源码 项目:iGAN 作者: junyanz 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def load_model(net, layer='fc8'):
    model_values = utils.PickleLoad(os.path.join(model_dir, 'caffe_reference_%s.pkl' % layer))
    lasagne.layers.set_all_param_values(net[layer], model_values)


问题


面经


文章

微信
公众号

扫码关注公众号