sda.py 文件源码

python
阅读 30 收藏 0 点赞 0 评论 0

项目:ladder 作者: abhiskk 项目源码 文件源码
def pretrain(self, x, pt_epochs, verbose=True):
        n = x.data.size()[0]
        num_batches = n / self.batch_size
        t = x

        # Pre-train 1 autoencoder at a time
        for i, ae_re in enumerate(self.autoencoders_ref):
            # Get the current autoencoder
            ae = getattr(self.sequential, ae_re)

            # Getting encoded output from the previous autoencoder
            if i > 0:
                # Set the requires_grad to False so that backprop doesn't
                # travel all the way back to the previous autoencoder
                temp = Variable(torch.FloatTensor(n, ae.d_in), requires_grad=False)
                for k in range(num_batches):
                    start, end = k * self.batch_size, (k + 1) * self.batch_size
                    prev_ae = getattr(self.sequential, self.autoencoders_ref[i - 1])
                    temp.data[start:end] = prev_ae.encode(t[start:end], add_noise=False).data
                t = temp
            optimizer = SGD(ae.parameters(), lr=self.pre_lr)

            # Pre-training
            print("Pre-training Autoencoder:", i)
            for ep in range(pt_epochs):
                agg_cost = 0.
                for k in range(num_batches):
                    start, end = k * self.batch_size, (k + 1) * self.batch_size
                    bt = t[start:end]
                    optimizer.zero_grad()
                    z = ae.encode(bt, add_noise=True)
                    z = ae.decode(z)
                    loss = -torch.sum(bt * torch.log(z) + (1.0 - bt) * torch.log(1.0 - z), 1)
                    cost = torch.mean(loss)
                    cost.backward()
                    optimizer.step()
                    agg_cost += cost
                agg_cost /= num_batches
                if verbose:
                    print("Pre-training Autoencoder:", i, "Epoch:", ep, "Cost:", agg_cost.data[0])
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号