python类report()的实例源码

nin.py 文件源码 项目:chainermn 作者: chainer 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h))
        h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
alex.py 文件源码 项目:chainermn 作者: chainer 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
nin.py 文件源码 项目:chainermn 作者: chainer 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h, train=self.train))
        h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000))

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
alex.py 文件源码 项目:chainermn 作者: chainer 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
updater.py 文件源码 项目:chainermn 作者: chainer 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = F.sum(F.softplus(-y_real)) / batchsize
        L2 = F.sum(F.softplus(y_fake)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
updater.py 文件源码 项目:chainer-pix2pix 作者: wuhuikai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def loss_D(self, real_D, fake_D):
        batch_size, _, h, w = real_D.shape

        loss = - F.sum(F.log(real_D + self.eps) + F.log(1 - fake_D + self.eps)) / (batch_size*h*w)
        chainer.report({'loss': loss}, self.D)

        return loss
updater.py 文件源码 项目:chainer-pix2pix 作者: wuhuikai 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def loss_G(self, real_B, fake_B, fake_D):
        loss_l1 = F.mean_absolute_error(real_B, fake_B)
        chainer.report({'loss_l1': loss_l1}, self.G)

        batch_size, _, h, w = fake_D.shape
        loss_D = - F.sum(F.log(fake_D + self.eps)) / (batch_size*h*w)
        chainer.report({'loss_D': loss_D}, self.G)

        loss = loss_D + self.lambd*loss_l1
        chainer.report({'loss': loss}, self.G)

        return loss
updater.py 文件源码 项目:chainer-pix2pix 作者: pfnet-research 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize,_,w,h = y_out.data.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, enc)
        return loss
updater.py 文件源码 项目:chainer-pix2pix 作者: pfnet-research 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize,_,w,h = y_out.data.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, dec)
        return loss
updater.py 文件源码 项目:chainer-pix2pix 作者: pfnet-research 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def loss_dis(self, dis, y_in, y_out):
        batchsize,_,w,h = y_in.data.shape

        L1 = F.sum(F.softplus(-y_in)) / batchsize / w / h
        L2 = F.sum(F.softplus(y_out)) / batchsize / w / h
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
updater.py 文件源码 项目:Semantic-Segmentation-using-Adversarial-Networks 作者: oyam 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _get_loss_dis(self):
        batchsize = self.y_fake.data.shape[0]
        loss = F.softmax_cross_entropy(self.y_real, Variable(self.xp.ones(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
        loss += F.softmax_cross_entropy(self.y_fake, Variable(self.xp.zeros(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
        chainer.report({'loss': loss}, self.dis)
        return loss
loss.py 文件源码 项目:paint_transfer_c92 作者: Hiroshiba 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def make_loss(self, target, raw_line, test):
        xp = self.models.mismatch_discriminator.xp
        batchsize = target.shape[0]
        l_true = xp.ones(batchsize, dtype=numpy.float32)
        l_false = xp.zeros(batchsize, dtype=numpy.float32)

        raw_line_mismatch = chainer.functions.permutate(
            raw_line, indices=numpy.roll(numpy.arange(batchsize, dtype=numpy.int32), shift=1), axis=0)

        output = self.forwarder.forward(
            input=target,
            raw_line=raw_line,
            raw_line_mismatch=raw_line_mismatch,
            test=test,
        )
        generated = output['generated']
        match = output['match']
        mismatch = output['mismatch']
        z = output['z']

        mse = chainer.functions.mean_squared_error(generated, target)
        loss_gen = {'mse': mse}
        chainer.report(loss_gen, self.models.generator)

        match_lsm = utility.chainer.least_square_mean(match, l_false)
        mismatch_lsm = utility.chainer.least_square_mean(mismatch, l_true)
        loss_mismatch_discriminator = {'match_lsm': match_lsm, 'mismatch_lsm': mismatch_lsm}
        chainer.report(loss_mismatch_discriminator, self.models.mismatch_discriminator)

        fake_mismatch_lsm = utility.chainer.least_square_mean(match, l_true)
        z_l2 = chainer.functions.sum(z ** 2) / z.size
        loss_enc = {'mse': mse, 'fake_mismatch_lsm': fake_mismatch_lsm, 'activity_regularization': z_l2}
        chainer.report(loss_enc, self.models.encoder)

        return {
            'encoder': loss_enc,
            'generator': loss_gen,
            'mismatch_discriminator': loss_mismatch_discriminator,
        }
dpn.py 文件源码 项目:chainer-DPNs 作者: oyam 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        features = F.concat(self.features(x), axis=1)
        out = F.average_pooling_2d(features, ksize=7)
        out = self.classifier(out)

        loss = F.softmax_cross_entropy(out, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(out, t)}, self)
        return loss
resnet50.py 文件源码 项目:chainer-DPNs 作者: oyam 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = self.bn1(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h)
        h = self.res3(h)
        h = self.res4(h)
        h = self.res5(h)
        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.fc(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
nin.py 文件源码 项目:chainer-DPNs 作者: oyam 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h))
        h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
models.py 文件源码 项目:wavenet 作者: rampage644 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __call__(self, x, t, label):
         y = self.predictor(x, label)
         dims = self.xp.prod(np.array(y.shape[2:]))  # for CIFAR should be 3072

         nll = F.softmax_cross_entropy(y, t, normalize=True)
         chainer.report({'nll': nll, 'bits/dim': nll / dims}, self)
         return nll
updater_LSGAN.py 文件源码 项目:GAN 作者: lyakaap 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = 0.5 * (F.sum((y_real - self.b) ** 2)) / batchsize
        L2 = 0.5 * (F.sum((y_fake - self.a) ** 2)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
updater_LSGAN.py 文件源码 项目:GAN 作者: lyakaap 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def loss_gen(self, gen, y_fake):
        batchsize = len(y_fake)
        loss = 0.5 * (F.sum((y_fake - self.c) ** 2)) / batchsize
        chainer.report({'loss': loss}, gen)
        return loss
updater.py 文件源码 项目:GAN 作者: lyakaap 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = 0.5 * (F.sum((y_real - self.b) ** 2)) / batchsize
        L2 = 0.5 * (F.sum((y_fake - self.a) ** 2)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
updater.py 文件源码 项目:GAN 作者: lyakaap 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss_gen(self, gen, y_fake):
        batchsize = len(y_fake)
        loss = 0.5 * (F.sum((y_fake - self.c) ** 2)) / batchsize
        chainer.report({'loss': loss}, gen)
        return loss


问题


面经


文章

微信
公众号

扫码关注公众号