python类accuracy()的实例源码

P4MAllCNNC.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8 * 8

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
P4AllCNNC.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8 * 4

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
P4MResNet.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        # First conv layer
        h = self[0](x)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        n, nc, ns, nx, ny = h.data.shape
        h = F.reshape(h, (n, nc * ns, nx, ny))
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
AllCNNC.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
ResNet.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x

        # First conv layer
        h = self[0](h)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Z2CNN.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l2(h, train, finetune)

        h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0, cover_all=True, use_cudnn=True)

        h = self.l3(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, self.dr, train)

        h = self.top(h)

        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
chainer_model.py 文件源码 项目:biaffineparser 作者: chantera 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def compute_accuracy(self, y, t):
        arc_logits, label_logits = y
        true_arcs, true_labels = t.T

        b, l1, l2 = arc_logits.shape
        true_arcs = F.pad_sequence(true_arcs, padding=-1)
        if not self.model._cpu:
            true_arcs.to_gpu()
        arc_accuracy = F.accuracy(
            F.reshape(arc_logits, (b * l1, l2)),
            F.reshape(true_arcs, (b * l1,)),
            ignore_label=-1)

        b, l1, d = label_logits.shape
        true_labels = F.pad_sequence(true_labels, padding=-1)
        if not self.model._cpu:
            true_labels.to_gpu()
        label_accuracy = F.accuracy(
            F.reshape(label_logits, (b * l1, d)),
            F.reshape(true_labels, (b * l1,)),
            ignore_label=-1)

        accuracy = (arc_accuracy + label_accuracy) / 2
        return accuracy
lstm_tagger.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, ts = zip(*xs)
        ys = self.forward(ws, ss, ps)
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= batchsize
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
ja_lstm_parser.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, xs):
        batchsize = len(xs)
        ws, cs, ls, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, cs, ls)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
lstm_tagger_old.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, ts = zip(*xs)
        ys = self.forward(ws, ss, ps)
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= batchsize
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
tagger.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __call__(self, xs, ts):
        """
        Inputs:
            xs (tuple(Variable, Variable, Variable)):
                each of Variables is of dim (batchsize,)
            ts Variable:
                (batchsize)
        """
        words, suffixes, caps = xs[:,:7], xs[:, 7:14], xs[:, 14:]
        h_w = self.emb_word(words)
        h_c = self.emb_caps(caps)
        h_s = self.emb_suffix(suffixes)
        h = F.concat([h_w, h_c, h_s], 2)
        batchsize, ntokens, hidden = h.data.shape
        h = F.reshape(h, (batchsize, ntokens * hidden))
        ys = self.linear(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)

        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
japanese_tagger.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, ws, cs, ls, ts):
        h_w = self.emb_word(ws) #_(batchsize, windowsize, word_dim)
        h_c = self.emb_char(cs) # (batchsize, windowsize, max_char_len, char_dim)
        batchsize, windowsize, _, _ = h_c.data.shape
        # (batchsize, windowsize, char_dim)
        h_c = F.sum(h_c, 2)
        h_c, ls = F.broadcast(h_c, F.reshape(ls, (batchsize, windowsize, 1)))
        h_c = h_c / ls
        h = F.concat([h_w, h_c], 2)
        h = F.reshape(h, (batchsize, -1))
        # ys = self.linear1(h)
        h = F.relu(self.linear1(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        ys = self.linear2(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
mdl_rgb_d.py 文件源码 项目:MultimodalDL 作者: masataka46 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
error.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def compute_accuracy(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        acc = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in range(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            printr("computing accuracy ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            acc.append(compute_accuracy_batch(model, batch))

        result.append(sum(acc) / len(acc))
        printr("")

    return result
error.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def compute_perplexity(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        ppl = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in range(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            sys.stdout.write("\rcomputing perplexity ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            sys.stdout.flush()
            ppl.append(compute_perplexity_batch(model, batch))

        result.append(sum(ppl) / len(ppl))

        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()
    return result
alexbn.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
alex.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
base_network.py 文件源码 项目:ImageCaptioning 作者: rkuga 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def step(self,perm,batch_index, mode, epoch): 
            if mode =='train':
                data, label=self.read_batch(perm,batch_index,self.train_data)
            else:
                data, label=self.read_batch(perm,batch_index,self.test_data)

            data = Variable(cuda.to_gpu(data))
            yl = self.network(data)

            label=Variable(cuda.to_gpu(label))

            L_network = F.softmax_cross_entropy(yl, label)
            A_network = F.accuracy(yl, label)

            if mode=='train':
                self.o_network.zero_grads()
                L_network.backward()
                self.o_network.update()


            return {"prediction": yl.data.get(),
                    "current_loss": L_network.data.get(),
                    "current_accuracy": A_network.data.get(),
            }
error.py 文件源码 项目:chainer-glu 作者: musyoku 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_accuracy(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        acc = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in xrange(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            sys.stdout.write("\rcomputing accuracy ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            sys.stdout.flush()
            acc.append(compute_accuracy_batch(model, batch))

        result.append(sum(acc) / len(acc))
        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()

    return result
error.py 文件源码 项目:chainer-glu 作者: musyoku 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compute_perplexity(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        ppl = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in xrange(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            sys.stdout.write("\rcomputing perplexity ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            sys.stdout.flush()
            ppl.append(compute_perplexity_batch(model, batch))

        result.append(sum(ppl) / len(ppl))
        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()
    return result
ewc_mnist.py 文件源码 项目:chainer-EWC 作者: okdshin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, *args):
        x = args[:-1]
        t = args[-1]
        self.y = None
        self.loss = None
        self.accuracy = None
        self.y = self.predictor(*x)
        self.loss = F.softmax_cross_entropy(self.y, t)

        if self.stored_variable_list is not None and \
                self.fisher_list is not None:  # i.e. Stored
            for i in range(len(self.variable_list)):
                self.loss += self.lam/2. * F.sum(
                        self.fisher_list[i] *
                        F.square(self.variable_list[i][1] -
                                 self.stored_variable_list[i]))
        reporter.report({'loss': self.loss}, self)
        if self.compute_accuracy:
            self.accuracy = F.accuracy(self.y, t)
            reporter.report({'accuracy': self.accuracy}, self)
        return self.loss
ResNet50.py 文件源码 项目:chainer-caption 作者: apple2373 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h, self.train)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        h = self.res5(h, self.train)
        h = F.average_pooling_2d(h, 7, stride=1)
        if t=="feature":
            return h
        h = self.fc(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss
        else:
            return h
model_cnn.py 文件源码 项目:cifar-10 作者: shiba24 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, x, t, predict=False):
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 2, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 2, stride=2)
        h = F.dropout(F.relu(self.conv3(h)), ratio=0.6, train=self.train)
        h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
        h = F.average_pooling_2d(F.relu(self.conv5(h)), 3, stride=1)
        h = F.dropout(F.relu(self.fc6(h)), ratio=0.6, train=self.train)
        h = self.fc7(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        if predict:
            return h
        else:
            return self.loss
voxelchain.py 文件源码 项目:voxcelchain 作者: hiroaki-kaneda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        # To solve the classification problem with "softmax", use "softmax_cross_entropy".
        h = self.fwd(x)
        loss = F.softmax_cross_entropy (h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
chainer_usage.py 文件源码 项目:nelder_mead 作者: owruby 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.relu(self.l1(x))
        h = self.l2(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)

        return self.loss
yolov2_train_class_caltech.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def evaluate(model, dataset, crop_margin, test_size):
    xp = model.xp
    iterator = chainer.iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)
    acc_sum = 0
    iteration = 0
    for batch in iterator:
        image_batch = []
        label_batch = []
        for image_path, category_id, _ in batch:
            image = load_image(image_path)
            image_width, image_height = image.size
            crop_size = min(image_width, image_height) - crop_margin
            crop_rect = ((image_width - crop_size) // 2, (image_height - crop_size) // 2, crop_size, crop_size)
#            input_size = test_size
            input_size = int(round(crop_size / 32.0) * 32)
            if input_size < 64:
                input_size = 64
            elif input_size > test_size:
                input_size = test_size
            image_batch.append(transform_image(image, crop_rect, input_size))
            label_batch.append(category_id)

        x = xp.asarray(image_batch)
        t = xp.asarray(label_batch)

        with chainer.using_config('enable_backprop', False):
            with chainer.using_config('train', False):
                y = model(x)
        acc = F.accuracy(y, t)
        acc_sum += float(acc.data)
    return acc_sum / len(dataset)
yolov2_train_class.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def evaluate(model, dataset, crop_margin, test_size, batch_size):
    xp = model.xp
    iterator = chainer.iterators.SerialIterator(dataset, batch_size, repeat=False, shuffle=False)
    acc_sum = 0
    iteration = 0
    for batch in iterator:
        image_batch = []
        label_batch = []
        for image_path, category_id, _ in batch:
            image = load_image(image_path)
            image_width, image_height = image.size
            crop_size = min(image_width, image_height) - crop_margin
            crop_rect = ((image_width - crop_size) // 2, (image_height - crop_size) // 2, crop_size, crop_size)
            input_size = test_size
            image_batch.append(transform_image(image, crop_rect, input_size))
            label_batch.append(category_id)

        x = xp.asarray(image_batch)
        t = xp.asarray(label_batch)

        with chainer.using_config('enable_backprop', False):
            with chainer.using_config('train', False):
                y = model(x)
        acc = F.accuracy(y, t)
        acc_sum += float(acc.data) * batch_size
    return acc_sum / len(dataset)
vgg16.py 文件源码 项目:fcn 作者: wkentaro 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x, t=None):
        h = x
        h = F.relu(self.conv1_1(h))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), ratio=.5)
        h = F.dropout(F.relu(self.fc7(h)), ratio=.5)
        h = self.fc8(h)
        fc8 = h

        self.score = fc8

        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(fc8, t)
        self.accuracy = F.accuracy(self.score, t)
        return self.loss
P4CNN_RP.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        # h = F.dropout(h, self.dr, train)
        h = F.max(h, axis=-3, keepdims=False)

        h = self.l2(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)

        h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)

        h = self.l3(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)

        # h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)
        # h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)
        # h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)

        h = self.top(h)

        h = F.max(h, axis=-3, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
lstm_parser_old.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, ss, ps)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])


        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss


问题


面经


文章

微信
公众号

扫码关注公众号