python类Param()的实例源码

learning_rate.py 文件源码 项目:Buffe 作者: bentzinir 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_learning_rate_func(solver_params):
    base = tt.fscalar('base')
    gamma = tt.fscalar('gamma')
    power = tt.fscalar('power')
    itrvl = tt.fscalar('itrvl')
    iter = tt.scalar('iter')

    if solver_params['lr_type']=='inv':
        lr_ = base * tt.pow(1 + gamma * iter, -power)

        lr = t.function(
            inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(gamma, default=solver_params['gamma']), t.Param(power, default=solver_params['power'])],
            outputs=lr_)

    elif solver_params['lr_type']=='fixed':
        lr_ = base

        lr = t.function(
            inputs=[iter, t.Param(base, default=solver_params['base'])],
            outputs=lr_,
            on_unused_input='ignore')

    elif solver_params['lr_type']=='episodic':
        lr_ = base / (tt.floor(iter/itrvl) + 1)

        lr = t.function(
            inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(itrvl, default=solver_params['interval'])],
            outputs=lr_,
            on_unused_input='ignore')
    return lr
nolearn_net.py 文件源码 项目:kaggle-right-whale 作者: felixlaumon 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def transform(self, X, target_layer_name, y=None):
        target_layer = self.layers_[target_layer_name]

        layers = self.layers_
        input_layers = [
            layer for layer in layers.values()
            if isinstance(layer, nn.layers.InputLayer)
        ]
        X_inputs = [
            theano.Param(input_layer.input_var, name=input_layer.name)
            for input_layer in input_layers
        ]

        target_layer_output = nn.layers.get_output(
            target_layer, None, deterministic=True
        )

        transform_iter = theano.function(
            inputs=X_inputs,
            outputs=target_layer_output,
            allow_input_downcast=True,
        )

        outputs = []
        for Xb, yb in self.batch_iterator_test(X):
            outputs.append(self.apply_batch_func(transform_iter, Xb))
        return np.vstack(outputs)
stacked_autoencoder.py 文件源码 项目:deep-learning-theano 作者: aidiary 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def pretraining_functions(self, train_set_x, batch_size):
        """???????????pre-training??????????
        ?????????x?????"""
        # ?????????????????
        index = T.lscalar('index')

        # ??????????????????????????????
        corruption_level = T.scalar('corruption')
        learning_rate = T.scalar('lr')

        batch_begin = index * batch_size
        batch_end = batch_begin + batch_size

        # ????????????????
        # ????????????????
        pretrain_functions = []
        for autoencoder in self.autoencoder_layers:
            # ??????????????????
            cost, updates = autoencoder.get_cost_updates(corruption_level, learning_rate)
            fn = theano.function(
                inputs=[
                    index,
                    # Param????????????????????Python????????
                    # Tensor???????corruption, lr???????
                    theano.Param(corruption_level, default=0.2),
                    theano.Param(learning_rate, default=0.1)
                ],
                outputs=cost,
                updates=updates,
                givens={
                    self.x: train_set_x[batch_begin:batch_end]
                }
            )
            pretrain_functions.append(fn)

        return pretrain_functions
SdA.py 文件源码 项目:DeepLearningTutorialForChinese 作者: zhaoyu611 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def pretraining_function(self,train_set_x,batch_size):
        '''
        ????????????????dA???,??????????
        ?????minibatch????????minibatch???????
        train_set_x: theano.tensor.TensorType   ??dA????(????)
        batch_size: int  [mini]batch??

        '''
        #[mini]batch???
        index=T.lscalar('index')
        corruption_level=T.scalar('corruption') #corruption???
        learning_rate=T.scalar('lr') #???
        #batch??
        n_bathes=train_set_x.get_value(borrow=True).shape[0]/batch_size
        #??index?????
        # batch
        batch_begin=index*batch_size
        #??index?????batch
        batch_end=batch_begin+batch_size

        pretrain_fns=[]
        for dA in self.dA_layers: #??dA
            #???????????
            cost,updates=dA.get_cost_updates(corruption_level,
                                            learning_rate)
            #??theano??
            fn=theano.function(inputs=[index,
                            theano.Param(corruption_level,default=0.2),
                            theano.Param(learning_rate,default=0.1)],
                                outputs=cost,
                                updates=updates,
                                givens={self.x:train_set_x[batch_begin:
                                                           batch_end]})
            #?fn???????
            pretrain_fns.append(fn)

        return pretrain_fns
DBN.py 文件源码 项目:DeepLearningTutorialForChinese 作者: zhaoyu611 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def pretraining_functions(self,train_set_x,batch_size,k):
        """
        ??????????????????????????minibatch???
        ????RBM,?????minibatch???????

        train_set_x: theano.tensor.TensorType ??????
        batch_size: int minibatch???
        k:  int CD-k/PCD-k?Gibbs????
        """
        index=T.lscalar('index') #minibatch???
        learning_rate=T.scalar('lr') #???
        #bathes??
        n_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size
        #??index????batch
        batch_begin=index*batch_size
        #??index????batch
        batch_end=batch_begin+batch_size

        pretrain_fns=[]
        for rbm in self.rbm_layers:  #??????RBM
            #??????????
            #??CD-k(??persisitent=None)?????RBM
            cost,updates=rbm.get_cost_updates(learning_rate,persistent=None,k=k)

            #??thenao??,???learning_rate???tensor??
            fn=theano.function(inputs=[index,theano.Param(learning_rate,default=0.1)],
                               outputs=cost,updates=updates,
                               givens={self.x:train_set_x[batch_begin:batch_end]})
            #?'fn'???list???
            pretrain_fns.append(fn)
        return pretrain_fns
optimize_gan.py 文件源码 项目:GRAN 作者: jiwoongim 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def optimize_gan_hkl(self, model, lam1=0.00001):
        """
        optimizer for hkl packaged dataset. 
        Returns the updates for discirminator & generator and computed costs for the model.
        """

        i = T.iscalar('i'); 
        lr = T.fscalar('lr');
        Xu = T.fmatrix('X'); 

        cost_disc   = model.cost_dis(Xu, self.batch_sz) \
                                + lam1 * model.dis_network.weight_decay_l2()
        gparams_dis = T.grad(cost_disc, model.dis_network.params)

        cost_gen    = model.cost_gen(self.batch_sz) 
        gparams_gen = T.grad(cost_gen, model.gen_network.params)


        updates_dis = self.ADAM(model.dis_network.params, gparams_dis, lr)
        updates_gen = self.ADAM(model.gen_network.params, gparams_gen, lr)


        discriminator_update = theano.function([Xu, theano.Param(lr,default=self.epsilon_dis)],\
                                    outputs=cost_disc, updates=updates_dis)

        generator_update = theano.function([theano.Param(lr,default=self.epsilon_gen)],\
                outputs=cost_gen, updates=updates_gen)

        get_valid_cost   = theano.function([Xu], outputs=[cost_disc, cost_gen])

        get_test_cost   = theano.function([Xu], outputs=[cost_disc, cost_gen])

        return discriminator_update, generator_update, get_valid_cost, get_test_cost
optimize_gan.py 文件源码 项目:GRAN 作者: jiwoongim 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def optimize_gan(self, model, train_set, valid_set, test_set, lam1=0.00001):
        """
        optimizer for non packaged dataset, 
        returning updates for discriminator & generator, as well as the computed costs.
        """

        i = T.iscalar('i'); lr = T.fscalar('lr');
        Xu = T.matrix('X'); 
        cost_disc   = model.cost_dis(Xu, self.batch_sz) \
                     + lam1 * model.dis_network.weight_decay_l2() 

        gparams_dis = T.grad(cost_disc, model.dis_network.params)

        cost_gen    = model.cost_gen(self.batch_sz)
        gparams_gen = T.grad(cost_gen, model.gen_network.params)


        updates_dis = self.ADAM(model.dis_network.params, gparams_dis, lr)
        updates_gen = self.ADAM(model.gen_network.params, gparams_gen, lr)

        discriminator_update = theano.function([i, theano.Param(lr,default=self.epsilon_dis)],\
                outputs=cost_disc, updates=updates_dis,\
                givens={Xu:train_set[0][i*self.batch_sz:(i+1)*self.batch_sz]})

        generator_update = theano.function([theano.Param(lr,default=self.epsilon_gen)],\
                outputs=cost_gen, updates=updates_gen)

        get_valid_cost   = theano.function([i], outputs=[cost_disc, cost_gen],\
                givens={Xu:valid_set[0][i*self.batch_sz:(i+1)*self.batch_sz]})

        get_test_cost   = theano.function([i], outputs=[cost_disc, cost_gen],\
                givens={Xu:test_set[0][i*self.batch_sz:(i+1)*self.batch_sz]})

        return discriminator_update, generator_update, get_valid_cost, get_test_cost
DBN_for_DTIs.py 文件源码 项目:DeepDTIs_DBN 作者: Bjoux2 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def pretraining_functions(self, train_set_x, batch_size, k):
        '''Generates a list of functions, for performing one step of
        gradient descent at a given layer. The function will require
        as input the minibatch index, and to train an RBM you just
        need to iterate, calling the corresponding function on all
        minibatch indexes.

        :type train_set_x: theano.tensor.TensorType
        :param train_set_x: Shared var. that contains all datapoints used
                            for training the RBM
        :type batch_size: int
        :param batch_size: size of a [mini]batch
        :param k: number of Gibbs steps to do in CD-k / PCD-k

        '''

        # index to a [mini]batch
        index = T.lscalar('index')  # index to a minibatch
        learning_rate = T.scalar('lr')  # learning rate to use

        # number of batches
        n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for rbm in self.rbm_layers:

            # get the cost and the updates list
            # using CD-k here (persisent=None) for training each RBM.
            # TODO: change cost function to reconstruction error
            cost, updates = rbm.get_cost_updates(learning_rate,
                                                 persistent=None, k=k)

            # compile the theano function
            fn = theano.function(
                inputs=[index, theano.Param(learning_rate, default=0.1)],
                outputs=cost,
                updates=updates,
                givens={
                    self.x: train_set_x[batch_begin:batch_end]
                }
            )
            # append `fn` to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns
SdA.py 文件源码 项目:deep_learning_chemical 作者: samocooper 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def pretraining_functions(self, train_set_x, batch_size):
        ''' Generates a list of functions, each of them implementing one
        step in trainnig the dA corresponding to the layer with same index.
        The function will require as input the minibatch index, and to train
        a dA you just need to iterate, calling the corresponding function on
        all minibatch indexes.

        :type train_set_x: theano.tensor.TensorType
        :param train_set_x: Shared variable that contains all datapoints used
                            for training the dA

        :type batch_size: int
        :param batch_size: size of a [mini]batch

        :type learning_rate: float
        :param learning_rate: learning rate used during training for any of
                              the dA layers
        '''

        # index to a [mini]batch
        index = T.lscalar('index')  # index to a minibatch
        corruption_level = T.scalar('corruption')  # % of corruption to use
        learning_rate = T.scalar('lr')  # learning rate to use
        # number of batches
        # n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for dA in self.dA_layers:
            # get the cost and the updates list
            cost, updates = dA.get_cost_updates(corruption_level,
                                                learning_rate)
            # compile the theano function
            fn = theano.function(inputs=[index,
                              theano.Param(corruption_level, default=0.2),
                              theano.Param(learning_rate, default=0.1)],
                                 outputs=cost,
                                 updates=updates,
                                 givens={self.x: train_set_x[batch_begin:
                                                             batch_end]})
            # append `fn` to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns
SdA.py 文件源码 项目:HumanActivityRecognition 作者: humachine 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def pretraining_functions(self, train_set_x, batch_size):
        ''' Generates a list of functions, each of them implementing one
        step in trainnig the dA corresponding to the layer with same index.
        The function will require as input the minibatch index, and to train
        a dA you just need to iterate, calling the corresponding function on
        all minibatch indexes.

        :type train_set_x: theano.tensor.TensorType
        :param train_set_x: Shared variable that contains all datapoints used
                            for training the dA

        :type batch_size: int
        :param batch_size: size of a [mini]batch

        :type learning_rate: float
        :param learning_rate: learning rate used during training for any of
                              the dA layers
        '''

        # index to a [mini]batch
        index = T.lscalar('index')  # index to a minibatch
        corruption_level = T.scalar('corruption')  # % of corruption to use
        learning_rate = T.scalar('lr')  # learning rate to use
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for dA in self.dA_layers:
            # get the cost and the updates list
            cost, updates = dA.get_cost_updates(corruption_level,
                                                learning_rate)
            # compile the theano function
            fn = theano.function(
                inputs=[
                    index,
                    theano.Param(corruption_level, default=0.2),
                    theano.Param(learning_rate, default=0.1)
                ],
                outputs=cost,
                updates=updates,
                givens={
                    self.x: train_set_x[batch_begin: batch_end]
                }
            )
            # append `fn` to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns
multi_layer.py 文件源码 项目:DCN 作者: boyangumn 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def pretraining_functions(self, train_set_x, batch_size):
        ''' Generates a list of functions, each of them implementing one
        step in trainnig the dA corresponding to the layer with same index.
        The function will require as input the minibatch index, and to train
        a dA you just need to iterate, calling the corresponding function on
        all minibatch indexes.

        :type train_set_x: theano.tensor.TensorType
        :param train_set_x: Shared variable that contains all datapoints used
                            for training the dA

        :type batch_size: int
        :param batch_size: size of a [mini]batch

        :type learning_rate: float
        :param learning_rate: learning rate used during training for any of
                              the dA layers
        '''

        # index to a [mini]batch
        index = T.lscalar('index')  # index to a minibatch
        corruption_level = T.scalar('corruption')  # % of corruption to use
        learning_rate = T.scalar('lr')  # learning rate to use
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for dA in self.dA_layers:
            # get the cost and the updates list
            cost, updates = dA.get_cost_updates(corruption_level,
                                                learning_rate)
            # compile the theano function
            fn = theano.function(
                inputs=[
                    index,
                    theano.Param(corruption_level, default = 0.2),
                    theano.Param(learning_rate, default = 0.1)
                ],
                outputs=cost,
                updates=updates,
                givens={
                    self.x: train_set_x[batch_begin: batch_end]
                }
            )
            # append `fn` to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns


问题


面经


文章

微信
公众号

扫码关注公众号