def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, s: suffix, p: prefix, y: label
"""
batchsize = len(xs)
ws, ss, ps, ts = zip(*xs)
ys = self.forward(ws, ss, ps)
loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])
acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])
acc /= batchsize
chainer.report({
"loss": loss,
"accuracy": acc
}, self)
return loss
python类report()的实例源码
def __call__(self, xs):
batchsize = len(xs)
ws, cs, ls, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, cs, ls)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, s: suffix, p: prefix, y: label
"""
batchsize = len(xs)
ws, ss, ps, ts = zip(*xs)
ys = self.forward(ws, ss, ps)
loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])
acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])
acc /= batchsize
chainer.report({
"loss": loss,
"accuracy": acc
}, self)
return loss
def __call__(self, xs, ts):
"""
Inputs:
xs (tuple(Variable, Variable, Variable)):
each of Variables is of dim (batchsize,)
ts Variable:
(batchsize)
"""
words, suffixes, caps = xs[:,:7], xs[:, 7:14], xs[:, 14:]
h_w = self.emb_word(words)
h_c = self.emb_caps(caps)
h_s = self.emb_suffix(suffixes)
h = F.concat([h_w, h_c, h_s], 2)
batchsize, ntokens, hidden = h.data.shape
h = F.reshape(h, (batchsize, ntokens * hidden))
ys = self.linear(h)
loss = F.softmax_cross_entropy(ys, ts)
acc = F.accuracy(ys, ts)
chainer.report({
"loss": loss,
"accuracy": acc
}, self)
return loss
def __call__(self, ws, ss, ps, ts):
"""
xs [(w,s,p,y), ..., ]
w: word, s: suffix, p: prefix, y: label
"""
batchsize, length = ts.shape
ys = self.forward(ws, ss, ps)[1:-1]
ts = [F.squeeze(x, 0) for x in F.split_axis(F.transpose(ts), length, 0)]
loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])
acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])
acc /= length
chainer.report({
"loss": loss,
"accuracy": acc
}, self)
return loss
updater.py 文件源码
项目:Semantic-Segmentation-using-Adversarial-Networks
作者: oyam
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def _get_loss_gen(self):
batchsize = self.y_fake.data.shape[0]
L_mce = F.softmax_cross_entropy(self.pred_label_map, self.ground_truth, normalize=False)
L_bce = F.softmax_cross_entropy(self.y_fake, Variable(self.xp.ones(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
loss = L_mce + self.L_bce_weight * L_bce
# log report
label_true = chainer.cuda.to_cpu(self.ground_truth.data)
label_pred = chainer.cuda.to_cpu(self.pred_label_map.data).argmax(axis=1)
logs = []
for i in six.moves.range(batchsize):
acc, acc_cls, iu, fwavacc = utils.label_accuracy_score(
label_true[i], label_pred[i], self.n_class)
logs.append((acc, acc_cls, iu, fwavacc))
log = np.array(logs).mean(axis=0)
values = {
'loss': loss,
'accuracy': log[0],
'accuracy_cls': log[1],
'iu': log[2],
'fwavacc': log[3],
}
chainer.report(values, self.gen)
return loss
updater.py 文件源码
项目:Semantic-Segmentation-using-Adversarial-Networks
作者: oyam
项目源码
文件源码
阅读 86
收藏 0
点赞 0
评论 0
def calc_loss(self):
batchsize = self.ground_truth.shape[0]
self.loss = F.softmax_cross_entropy(self.pred_label_map, self.ground_truth, normalize=False)
# log report
label_true = chainer.cuda.to_cpu(self.ground_truth.data)
label_pred = chainer.cuda.to_cpu(self.pred_label_map.data).argmax(axis=1)
logs = []
for i in six.moves.range(batchsize):
acc, acc_cls, iu, fwavacc = utils.label_accuracy_score(
label_true[i], label_pred[i], self.n_class)
logs.append((acc, acc_cls, iu, fwavacc))
log = np.array(logs).mean(axis=0)
values = {
'loss': self.loss,
'accuracy': log[0],
'accuracy_cls': log[1],
'iu': log[2],
'fwavacc': log[3],
}
chainer.report(values, self.model)
def sum_loss(self, loss):
sum_loss_enc = BaseLoss.blend_loss(loss['encoder'], self.config.blend['encoder'])
chainer.report({'sum_loss': sum_loss_enc}, self.models.encoder)
sum_loss_gen = BaseLoss.blend_loss(loss['generator'], self.config.blend['generator'])
chainer.report({'sum_loss': sum_loss_gen}, self.models.generator)
sum_loss_mismatch_discriminator = BaseLoss.blend_loss(
loss['mismatch_discriminator'], self.config.blend['mismatch_discriminator'])
chainer.report({'sum_loss': sum_loss_mismatch_discriminator}, self.models.mismatch_discriminator)
return {
'encoder': sum_loss_enc,
'generator': sum_loss_gen,
'mismatch_discriminator': sum_loss_mismatch_discriminator,
}
def __call__(self, x, t):
# To solve the classification problem with "softmax", use "softmax_cross_entropy".
h = self.fwd(x)
loss = F.softmax_cross_entropy (h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def update_core(self):
xp = self.gen.xp
self._iter += 1
opt_g = self.get_optimizer('gen')
opt_d = self.get_optimizer('dis')
data_z = self.get_latent_code_batch()
data_x = self.get_real_image_batch()
x_fake = self.gen(Variable(data_z))
dis_fake = self.dis(x_fake)
loss_gen = loss_func_lsgan_dis_real(dis_fake)
chainer.report({'loss': loss_gen}, self.gen)
opt_g.zero_grads()
loss_gen.backward()
opt_g.update()
x_fake.unchain_backward()
x_real = Variable(data_x)
dis_real = self.dis(x_real)
loss_dis = loss_func_lsgan_dis_real(dis_real) + loss_func_lsgan_dis_fake(dis_fake)
opt_d.zero_grads()
loss_dis.backward()
opt_d.update()
chainer.report({'loss': loss_dis}, self.dis)
def update_core(self):
xp = self.gen.xp
self._iter += 1
opt_g = self.get_optimizer('gen')
opt_d = self.get_optimizer('dis')
data_z = self.get_latent_code_batch()
data_x = self.get_real_image_batch()
x_fake = self.gen(Variable(data_z))
dis_fake = self.dis(x_fake)
loss_gen = loss_func_dcgan_dis_real(dis_fake)
chainer.report({'loss': loss_gen}, self.gen)
opt_g.zero_grads()
loss_gen.backward()
opt_g.update()
x_fake.unchain_backward()
x_real = Variable(data_x)
dis_real = self.dis(x_real)
loss_dis = loss_func_dcgan_dis_real(dis_real) + loss_func_dcgan_dis_fake(dis_fake)
opt_d.zero_grads()
loss_dis.backward()
opt_d.update()
chainer.report({'loss': loss_dis}, self.dis)
def make_loss(self, input, concat, target, test):
output = self.forwarder(input, concat, test)['image']
mae_loss = chainer.functions.mean_absolute_error(output, target)
loss = {
'mae': mae_loss,
}
chainer.report(loss, self.model)
return {
'main': loss,
}
def sum_loss(self, loss):
sum_loss = self.blend_loss(loss, self.config.blend['main'])
chainer.report({'sum_loss': sum_loss}, self.model)
return sum_loss
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = F.sum(F.softplus(-y_real)) / batchsize
L2 = F.sum(F.softplus(y_fake)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(F.softplus(-y_fake)) / batchsize
chainer.report({'loss': loss}, gen)
return loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, cs, ls, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, cs, ls, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
if len(xs[0]) == 6:
ws, ss, ps, ls, cat_ts, dep_ts = zip(*xs)
xp = chainer.cuda.get_array_module(ws[0])
weights = [xp.array(1, 'f') for _ in xs]
else:
ws, ss, ps, ls, cat_ts, dep_ts, weights = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps, ls, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(cat_ys, cat_ts, weights)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)]) / batchsize
dep_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(dep_ys, dep_ts, weights)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)]) / batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, ws, cs, cat_ts, dep_ts):
batchsize, length = cat_ts.shape
cat_ys, dep_ys = self.forward(ws, cs)
cat_ys = cat_ys[1:-1]
cat_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(cat_ts), length, 0)]
assert len(cat_ys) == len(cat_ts)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
# hs [(length, hidden_dim), ...]
dep_ys = [x[1:-1] for x in dep_ys]
dep_ts = [F.reshape(x, (length,)) for x in F.split_axis(dep_ts, batchsize, 0)]
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= length
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
if len(xs[0]) == 5:
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
xp = chainer.cuda.get_array_module(ws[0])
weights = [xp.array(1, 'f') for _ in xs]
else:
ws, ss, ps, cat_ts, dep_ts, weights = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(cat_ys, cat_ts, weights)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) \
for y, t in zip(cat_ys, cat_ts)]) / batchsize
dep_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(dep_ys, dep_ts, weights)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) \
for y, t in zip(dep_ys, dep_ts)]) / batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, ws, ss, ps, ts):
"""
xs [(w,s,p,y), ..., ]
w: word, s: suffix, p: prefix, y: label
"""
batchsize, length = ws.shape
cat_ys, dep_ys = self.forward(ws, ss, ps)[1:-1]
cat_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(cat_ts), length, 0)]
dep_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(dep_ts), length, 0)]
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= length
dep_acc /= length
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, image, x, v):
y = self.predict(image)
loss = mean_squared_error(y, x, v, use_visibility=self.use_visibility)
chainer.report({'loss': loss}, self)
return loss
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, D_u_rough, batchsize, alpha=0.1, beta=0.1):
xp = self.gen.xp
loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
loss_adv_unpaired = F.softmax_cross_entropy(D_u_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
#loss_line = self.line_loss(G_p_rough, p_line)
loss = loss_L + alpha * loss_adv + beta * loss_adv_unpaired #+ loss_line
chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv, 'loss_adv_u': loss_adv_unpaired}, gen)
return loss
def loss_dis(self, dis, D_p_rough, p_line, D_u_rough, u_line, batchsize, alpha=0.1, beta=0.1):
xp = self.gen.xp
loss_fake_p = F.softmax_cross_entropy(D_p_rough, Variable(xp.ones(batchsize, dtype=np.int32)))
loss_real_p = F.softmax_cross_entropy(self.dis(p_line), Variable(xp.zeros(batchsize, dtype=np.int32)))
loss_fake_u = F.softmax_cross_entropy(D_u_rough, Variable(xp.ones(batchsize, dtype=np.int32)))
loss_real_u = F.softmax_cross_entropy(self.dis(u_line), Variable(xp.zeros(batchsize, dtype=np.int32)))
loss = alpha * (loss_fake_p + loss_real_p) + beta * (loss_fake_u + loss_real_u)
chainer.report({'loss': loss, 'fake_p': loss_fake_p, 'real_p':loss_real_p, 'fake_u': loss_fake_u, 'real_u':loss_real_u}, dis)
return loss
def loss_gen(self, gen, G_out, gt, batchsize, alpha=1):
xp = self.gen.xp
loss_L = F.mean_squared_error(G_out, gt) * G_out.data.size
loss = loss_L
chainer.report({'loss': loss, "loss_L": loss_L}, gen)
return loss
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, batchsize, alpha=0.1):
xp = self.gen.xp
loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
#loss_line = self.line_loss(G_p_rough, p_line)
loss = loss_L + alpha * loss_adv #+ loss_line
chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv}, gen)
return loss
def loss_dis(self, dis, D_p_rough, p_line, batchsize, alpha=0.1):
xp = self.gen.xp
loss_fake_p = F.softmax_cross_entropy(D_p_rough, Variable(xp.ones(batchsize, dtype=np.int32)))
loss_real_p = F.softmax_cross_entropy(self.dis(p_line), Variable(xp.zeros(batchsize, dtype=np.int32)))
loss = alpha * (loss_fake_p + loss_real_p)
chainer.report({'loss': loss, 'fake_p': loss_fake_p, 'real_p':loss_real_p}, dis)
return loss
def __call__(self, x, sigmoid=True):
"""AutoEncoder"""
mu, ln_var = self.encode(x)
batchsize = len(mu.data)
# reconstruction loss
rec_loss = 0
for l in six.moves.range(self.k):
z = F.gaussian(mu, ln_var)
rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
/ (self.k * batchsize)
loss = rec_loss + \
self.C * gaussian_kl_divergence(mu, ln_var) / batchsize
chainer.report({'loss': loss}, self)
return loss
def __call__(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss