def __call__(self, x_image, t_image, x_action, t_action):
self.y_image, self.y_action = self.predictor(x_image, x_action)
predicted_action = self.action_meaning(
F.argmax(self.y_action, axis=1).data[0])
real_action = self.action_meaning(t_action)
if predicted_action != real_action:
print("Predicted action:", predicted_action,
"it was actually", real_action)
image_loss = F.mean_squared_error(self.y_image, t_image)
self.error_mask = normalize_2d(F.squared_error(self.y_image, t_image))
action_loss = F.softmax_cross_entropy(
self.y_action,
F.expand_dims(np.array(t_action, dtype=np.int32), axis=0),
)
print('Image loss', image_loss.data, ', Action loss:', action_loss.data)
return self.weight * image_loss + (1.0 - self.weight) * action_loss
python类softmax_cross_entropy()的实例源码
predictive_autoencoder.py 文件源码
项目:Multitask-and-Transfer-Learning
作者: AI-ON
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def encode(self, x_input, x_query, answer):
m = self.encode_input(x_input)
u = self.encode_query(x_query)
# print "m.data.shape", m.data.shape
# print "u.data.shape", u.data.shape
mu = functions.matmul(m, u, transb=True)
# print "mu.data.shape", mu.data.shape
# print "mu.data", mu.data
p = functions.softmax(mu)
c = self.encode_output(x_input)
# print "p.data.shape:", p.data.shape
# print "c.data.shape:", c.data.shape
# print "functions.swapaxes(c ,2, 1):", functions.swapaxes(c ,2, 1).data.shape
o = functions.matmul(functions.swapaxes(c ,1, 0), p) # (2, 50, 1)
o = functions.swapaxes(o ,1, 0) # (2, 50)
# print "u.data.shape:", u.data.shape
# print "o.data.shape:", o.data.shape
# print "u.data.shape:", u.data
# print "o.data.shape:", o.data
# print (u+o).data.shape
predict = self.W(u + o)
# print predict.data.shape
loss = functions.softmax_cross_entropy(predict, answer)
return loss
def __call__(self, x, t, predict=False):
h = self.bn1(self.conv1(x), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 2, stride=2)
h = self.bn2(self.conv2(h), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 2, stride=2)
h = F.dropout(F.relu(self.conv3(h)), ratio=0.6, train=self.train)
h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
h = F.average_pooling_2d(F.relu(self.conv5(h)), 3, stride=1)
h = F.dropout(F.relu(self.fc6(h)), ratio=0.6, train=self.train)
h = self.fc7(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
if predict:
return h
else:
return self.loss
def __call__(self, x, t):
# To solve the classification problem with "softmax", use "softmax_cross_entropy".
h = self.fwd(x)
loss = F.softmax_cross_entropy (h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
def __call__(self, x, t):
h = F.relu(self.l1(x))
h = self.l2(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
def __call__(self, x, t=None):
h = x
h = F.relu(self.conv1_1(h))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(F.relu(self.fc6(h)), ratio=.5)
h = F.dropout(F.relu(self.fc7(h)), ratio=.5)
h = self.fc8(h)
fc8 = h
self.score = fc8
if t is None:
assert not chainer.config.train
return
self.loss = F.softmax_cross_entropy(fc8, t)
self.accuracy = F.accuracy(self.score, t)
return self.loss
def decode(self,t_vec,t_pred,wei_arr=None):
ys_d = self.dec(t_vec)
ys_w = self.h2w(F.concat(ys_d, axis=0))
t_all = []
for t_each in t_pred: t_all += t_each.tolist()
t_all = xp.array(t_all, dtype=xp.int32)
if wei_arr is None:
loss = F.softmax_cross_entropy(ys_w, t_all) # /len(t_all)
else:
sec_arr = np.array([ys_d_e.data.shape[0] for ys_d_e in ys_d[:-1]])
sec_arr = np.cumsum(sec_arr)
loss = weighted_cross_entropy(ys_w,t_all,wei_arr,sec_arr)
# print("t:{}".format([self.vocab.itos(tp_e) for tp_e in t_pred[0].tolist()]))
# print("y:{}\n".format([self.vocab.itos(int(ys_w.data[ri].argmax())) for ri in range(len(t_pred[0]))]))
return loss
def lossClassifier(self,lossFun="softmaxcrossentropy",*args):
if lossFun=="softmaxcrossentropy":
F.softmax_cross_entropy()
def __call__(self, x, t, train=True, finetune=False):
h = self.l1(x, train, finetune)
# h = F.dropout(h, self.dr, train)
h = F.max(h, axis=-3, keepdims=False)
h = self.l2(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = self.l3(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
# h = F.dropout(h, self.dr, train)
h = self.l4(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
# h = F.dropout(h, self.dr, train)
h = self.l5(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
# h = F.dropout(h, self.dr, train)
h = self.l6(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
h = self.top(h)
h = F.max(h, axis=-3, keepdims=False)
h = F.max(h, axis=-1, keepdims=False)
h = F.max(h, axis=-1, keepdims=False)
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def calc_loss(y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def __call__(self, x, t):
self.y = self.predictor(x, self.train_depth)
if hasattr(self, 'class_weight'):
if isinstance(x.data, cuda.cupy.ndarray) \
and not isinstance(self.class_weight, cuda.cupy.ndarray):
self.class_weight = cuda.to_gpu(
self.class_weight, device=x.data.device)
self.loss = softmax_cross_entropy(
self.y, t, class_weight=self.class_weight)
else:
self.loss = F.softmax_cross_entropy(self.y, t)
reporter.report({'loss': self.loss}, self)
return self.loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, cs, ls, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, cs, ls, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
if len(xs[0]) == 6:
ws, ss, ps, ls, cat_ts, dep_ts = zip(*xs)
xp = chainer.cuda.get_array_module(ws[0])
weights = [xp.array(1, 'f') for _ in xs]
else:
ws, ss, ps, ls, cat_ts, dep_ts, weights = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps, ls, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(cat_ys, cat_ts, weights)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)]) / batchsize
dep_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(dep_ys, dep_ts, weights)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)]) / batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, ws, cs, cat_ts, dep_ts):
batchsize, length = cat_ts.shape
cat_ys, dep_ys = self.forward(ws, cs)
cat_ys = cat_ys[1:-1]
cat_ts = [F.reshape(x, (batchsize,)) for x \
in F.split_axis(F.transpose(cat_ts), length, 0)]
assert len(cat_ys) == len(cat_ts)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
# hs [(length, hidden_dim), ...]
dep_ys = [x[1:-1] for x in dep_ys]
dep_ts = [F.reshape(x, (length,)) for x in F.split_axis(dep_ts, batchsize, 0)]
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= length
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def __call__(self, xs):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(xs)
if len(xs[0]) == 5:
ws, ss, ps, cat_ts, dep_ts = zip(*xs)
xp = chainer.cuda.get_array_module(ws[0])
weights = [xp.array(1, 'f') for _ in xs]
else:
ws, ss, ps, cat_ts, dep_ts, weights = zip(*xs)
cat_ys, dep_ys = self.forward(ws, ss, ps, dep_ts if self.train else None)
cat_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(cat_ys, cat_ts, weights)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) \
for y, t in zip(cat_ys, cat_ts)]) / batchsize
dep_loss = reduce(lambda x, y: x + y,
[we * F.softmax_cross_entropy(y, t) \
for y, t, we in zip(dep_ys, dep_ts, weights)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) \
for y, t in zip(dep_ys, dep_ts)]) / batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss