def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, cs, ls, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, cs, ls, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
python类accuracy()的实例源码
def __call__(self, ws, cs, cat_ts, dep_ts):
batchsize, length = cat_ts.shape
cat_ys, dep_ys = self.forward(ws, cs)
cat_ys = cat_ys[1:-1]
cat_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(cat_ts), length, 0)]
assert len(cat_ys) == len(cat_ts)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
# hs [(length, hidden_dim), ...]
dep_ys = [x[1:-1] for x in dep_ys]
dep_ts = [F.reshape(x, (length,)) for x in F.split_axis(dep_ts, batchsize, 0)]
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= length
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
if len(xs[0]) == 5:
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
xp = chainer.cuda.get_array_module(ws[0])
weights = [xp.array(1, 'f') for _ in xs]
else:
ws, ss, ps, cat_ts, dep_ts, weights = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(cat_ys, cat_ts, weights)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) \
for y, t in zip(cat_ys, cat_ts)]) / batchsize
dep_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(dep_ys, dep_ts, weights)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) \
for y, t in zip(dep_ys, dep_ts)]) / batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def train(args):
model = EmbeddingTagger(args.model, 50, 20, 30)
model.setup_training(args.embed)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
train = CCGBankDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = CCGBankDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.SGD(lr=0.01)
optimizer.setup(model)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 5000, 'iteration'
log_interval = 200, 'iteration'
val_model = model.copy()
trainer.extend(extensions.Evaluator(val_iter, val_model), trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def __call__(self, ws, ss, ps, ts):
"""
xs [(w,s,p,y), ..., ]
w: word, s: suffix, p: prefix, y: label
"""
batchsize, length = ws.shape
cat_ys, dep_ys = self.forward(ws, ss, ps)[1:-1]
cat_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(cat_ts), length, 0)]
dep_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(dep_ts), length, 0)]
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= length
dep_acc /= length
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, x, t, train=True):
y = self.predictor(x, train)
self.loss = F.softmax_cross_entropy(y, t)
self.acc = F.accuracy(y, t)
return self.loss
def clear(self):
self.loss = None
self.accuracy = None
def __call__(self, x, y, t):
self.clear()
hR = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.convR1(x))), 3, stride=2)
hR = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.convR2(hR))), 3, stride=2)
hR = F.relu(self.convR3(hR))
hR = F.relu(self.convR4(hR))
hR = F.max_pooling_2d(F.relu(self.convR5(hR)), 3, stride=2)
hR = F.dropout(F.relu(self.fcR6(hR)), train=self.train)
hR = F.dropout(F.relu(self.fcR7(hR)), train=self.train)
hD = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.convD1(y))), 3, stride=2)
hD = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.convD2(hD))), 3, stride=2)
hD = F.relu(self.convD3(hD))
hD = F.relu(self.convD4(hD))
hD = F.max_pooling_2d(F.relu(self.convD5(hD)), 3, stride=2)
hD = F.dropout(F.relu(self.fcD6(hD)), train=self.train)
hD = F.dropout(F.relu(self.fcD7(hD)), train=self.train)
h = F.dropout(F.relu(self.fc8(hR, hD)), train=self.train)
h = self.fc9(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
def clear(self):
self.loss = None
self.accuracy = None
def compute_accuracy_batch(model, batch):
source, target = make_source_target_pair(batch)
if model.xp is cuda.cupy:
source = cuda.to_gpu(source)
target = cuda.to_gpu(target)
model.reset_state()
Y = model(source)
return float(F.accuracy(Y, target, ignore_label=ID_PAD).data)
def clear(self):
self.loss = None
self.accuracy = None
def clear(self):
self.loss = None
self.loss1 = None
self.loss2 = None
self.loss3 = None
self.accuracy = None
def clear(self):
self.loss = None
self.accuracy = None
def clear(self):
self.loss = None
self.loss1 = None
self.loss2 = None
self.loss3 = None
self.accuracy = None
def __call__(self, x, t):
self.clear()
test = not self.train
h = F.max_pooling_2d(
F.relu(self.norm1(self.conv1(x), test=test)), 3, stride=2, pad=1)
h = F.max_pooling_2d(
F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1)
h = self.inc3a(h)
h = self.inc3b(h)
h = self.inc3c(h)
h = self.inc4a(h)
a = F.average_pooling_2d(h, 5, stride=3)
a = F.relu(self.norma(self.conva(a), test=test))
a = F.relu(self.norma2(self.lina(a), test=test))
a = self.outa(a)
self.loss1 = F.softmax_cross_entropy(a, t)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
b = F.average_pooling_2d(h, 5, stride=3)
b = F.relu(self.normb(self.convb(b), test=test))
b = F.relu(self.normb2(self.linb(b), test=test))
b = self.outb(b)
self.loss2 = F.softmax_cross_entropy(b, t)
h = self.inc4e(h)
h = self.inc5a(h)
h = F.average_pooling_2d(self.inc5b(h), 7)
h = self.out(h)
self.loss3 = F.softmax_cross_entropy(h, t)
self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
self.accuracy = F.accuracy(h, t)
return self.loss
def clear(self):
self.loss = None
self.accuracy = None
def forward(x, t):
y, = func(inputs={'data': x}, outputs=['fc8'], train=False)
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
test_optimizers_by_linear_model.py 文件源码
项目:chainer-deconv
作者: germanRos
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def _train_linear_classifier(self, model, optimizer, gpu):
def _make_label(x):
a = (np.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
t = np.empty_like(a).astype(np.int32)
t[a >= 0] = 0
t[a < 0] = 1
return t
def _make_dataset(batch_size, unit_num, gpu):
x_data = np.random.uniform(
-1, 1, (batch_size, unit_num)).astype(np.float32)
t_data = _make_label(x_data)
if gpu:
x_data = cuda.to_gpu(x_data)
t_data = cuda.to_gpu(t_data)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
return x, t
for _ in six.moves.range(self.EPOCH):
x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
model.zerograds()
y = model(x)
loss = F.softmax_cross_entropy(y, t)
loss.backward()
optimizer.update()
x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
y_test = model(x_test)
return F.accuracy(y_test, t_test)
def clear(self):
self.loss = None
self.accuracy = None
def __call__(self, variables):
self.clear()
y = self.encode(variables[0])
self.loss = F.softmax_cross_entropy(y,variables[1])
self.accuracy = F.accuracy(y,variables[1])
return self.loss