def __call__(self, x, t):
h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
h = self.mlpconv4(F.dropout(h))
h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
python类report()的实例源码
def __call__(self, x, t):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def __call__(self, x, t):
h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
h = self.mlpconv4(F.dropout(h, train=self.train))
h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000))
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def __call__(self, x, t):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)), train=self.train)
h = F.dropout(F.relu(self.fc7(h)), train=self.train)
h = self.fc8(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = F.sum(F.softplus(-y_real)) / batchsize
L2 = F.sum(F.softplus(y_fake)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_D(self, real_D, fake_D):
batch_size, _, h, w = real_D.shape
loss = - F.sum(F.log(real_D + self.eps) + F.log(1 - fake_D + self.eps)) / (batch_size*h*w)
chainer.report({'loss': loss}, self.D)
return loss
def loss_G(self, real_B, fake_B, fake_D):
loss_l1 = F.mean_absolute_error(real_B, fake_B)
chainer.report({'loss_l1': loss_l1}, self.G)
batch_size, _, h, w = fake_D.shape
loss_D = - F.sum(F.log(fake_D + self.eps)) / (batch_size*h*w)
chainer.report({'loss_D': loss_D}, self.G)
loss = loss_D + self.lambd*loss_l1
chainer.report({'loss': loss}, self.G)
return loss
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize,_,w,h = y_out.data.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, enc)
return loss
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize,_,w,h = y_out.data.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, dec)
return loss
def loss_dis(self, dis, y_in, y_out):
batchsize,_,w,h = y_in.data.shape
L1 = F.sum(F.softplus(-y_in)) / batchsize / w / h
L2 = F.sum(F.softplus(y_out)) / batchsize / w / h
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
updater.py 文件源码
项目:Semantic-Segmentation-using-Adversarial-Networks
作者: oyam
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def _get_loss_dis(self):
batchsize = self.y_fake.data.shape[0]
loss = F.softmax_cross_entropy(self.y_real, Variable(self.xp.ones(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
loss += F.softmax_cross_entropy(self.y_fake, Variable(self.xp.zeros(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
chainer.report({'loss': loss}, self.dis)
return loss
def make_loss(self, target, raw_line, test):
xp = self.models.mismatch_discriminator.xp
batchsize = target.shape[0]
l_true = xp.ones(batchsize, dtype=numpy.float32)
l_false = xp.zeros(batchsize, dtype=numpy.float32)
raw_line_mismatch = chainer.functions.permutate(
raw_line, indices=numpy.roll(numpy.arange(batchsize, dtype=numpy.int32), shift=1), axis=0)
output = self.forwarder.forward(
input=target,
raw_line=raw_line,
raw_line_mismatch=raw_line_mismatch,
test=test,
)
generated = output['generated']
match = output['match']
mismatch = output['mismatch']
z = output['z']
mse = chainer.functions.mean_squared_error(generated, target)
loss_gen = {'mse': mse}
chainer.report(loss_gen, self.models.generator)
match_lsm = utility.chainer.least_square_mean(match, l_false)
mismatch_lsm = utility.chainer.least_square_mean(mismatch, l_true)
loss_mismatch_discriminator = {'match_lsm': match_lsm, 'mismatch_lsm': mismatch_lsm}
chainer.report(loss_mismatch_discriminator, self.models.mismatch_discriminator)
fake_mismatch_lsm = utility.chainer.least_square_mean(match, l_true)
z_l2 = chainer.functions.sum(z ** 2) / z.size
loss_enc = {'mse': mse, 'fake_mismatch_lsm': fake_mismatch_lsm, 'activity_regularization': z_l2}
chainer.report(loss_enc, self.models.encoder)
return {
'encoder': loss_enc,
'generator': loss_gen,
'mismatch_discriminator': loss_mismatch_discriminator,
}
def __call__(self, x, t):
features = F.concat(self.features(x), axis=1)
out = F.average_pooling_2d(features, ksize=7)
out = self.classifier(out)
loss = F.softmax_cross_entropy(out, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(out, t)}, self)
return loss
def __call__(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def __call__(self, x, t):
h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
h = self.mlpconv4(F.dropout(h))
h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def __call__(self, x, t, label):
y = self.predictor(x, label)
dims = self.xp.prod(np.array(y.shape[2:])) # for CIFAR should be 3072
nll = F.softmax_cross_entropy(y, t, normalize=True)
chainer.report({'nll': nll, 'bits/dim': nll / dims}, self)
return nll
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = 0.5 * (F.sum((y_real - self.b) ** 2)) / batchsize
L2 = 0.5 * (F.sum((y_fake - self.a) ** 2)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = 0.5 * (F.sum((y_fake - self.c) ** 2)) / batchsize
chainer.report({'loss': loss}, gen)
return loss
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = 0.5 * (F.sum((y_real - self.b) ** 2)) / batchsize
L2 = 0.5 * (F.sum((y_fake - self.a) ** 2)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = 0.5 * (F.sum((y_fake - self.c) ** 2)) / batchsize
chainer.report({'loss': loss}, gen)
return loss