python类functions()的实例源码

middle_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def functions(self):
        return collections.OrderedDict([
            ('conv1', [self.conv1, self.bnorm1, F.relu]),
            ('conv2', [self.conv2, self.bnorm2, F.relu]),
            ('pool1', [_max_pooling_2d]),
            ('conv3', [self.conv3, self.bnorm3, F.relu]),
            ('conv4', [self.conv4, self.bnorm4, F.relu]),
            ('pool2', [_spatial_pyramid_pooling_2d]),
            ('fc', [self.fc])
        ])
middle_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def available_layers(self):
        return list(self.functions.keys())
middle_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = x
        for key, funcs in self.functions.items():
            for func in funcs:
                h = func(h)
        return h
middle_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def extract(self, x, layers=['conv4']):
        h = x
        activations = []
        target_layers = set(layers)
        for key, funcs in self.functions.items():
            for func in funcs:
                h = func(h)
            if key in target_layers:
                activations.append(h)
                target_layers.remove(key)
        return activations, h
simple_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def functions(self):
        return collections.OrderedDict([
            ('conv1', [self.conv1, self.bnorm1, F.relu]),
            ('pool1', [_max_pooling_2d]),
            ('conv2', [self.conv2, self.bnorm2, F.relu]),
            ('pool2', [_spatial_pyramid_pooling_2d]),
            ('fc', [self.fc])
        ])
simple_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = x
        for key, funcs in self.functions.items():
            for func in funcs:
                h = func(h)
        return h
simple_model.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def extract(self, x, layers=['conv2']):
        h = x
        activations = []
        target_layers = set(layers)
        for key, funcs in self.functions.items():
            for func in funcs:
                h = func(h)
            if key in target_layers:
                activations.append(h)
                target_layers.remove(key)
        return activations, h
guidedbackprop.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self, model):
        super(GuidedBackpropGrad, self).__init__(model)
        for key, funcs in model.predictor.functions.items():
            ismatch = re.match(self.pattern, key)
            if ismatch:
                funcs[-1] = GuidedBackpropReLU()
guidedbackprop.py 文件源码 项目:fontkaruta_classifier 作者: suga93 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, model, stdev_spread=.15, n_samples=25, magnitude=True):
        super(GuidedBackpropSmoothGrad, self).__init__(
            model, stdev_spread, n_samples, magnitude)
        for key, funcs in model.predictor.functions.items():
            ismatch = re.match(self.pattern, key)
            if ismatch:
                funcs[-1] = GuidedBackpropReLU()
Parallel_BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def translate(self, xs, max_length=100):
        print("Now translating")
        batch = len(xs)
        print("batch",batch)
        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            wxs = [np.array([source_word_ids.get(w, UNK) for w in x], dtype=np.int32) for x in xs]
            wx_len = [len(wx) for wx in wxs]
            wx_section = np.cumsum(wx_len[:-1])
            valid_wx_section = np.insert(wx_section, 0, 0)
            cxs = [np.array([source_char_ids.get(c, UNK) for c in list("".join(x))], dtype=np.int32) for x in xs]

            wexs = sequence_embed(self.embed_xw, wxs)
            cexs = sequence_embed(self.embed_xc, cxs)

            wexs_f = wexs
            wexs_b = [wex[::-1] for wex in wexs]
            cexs_f = cexs
            cexs_b = [cex[::-1] for cex in cexs]

            _, hfw = self.encoder_fw(None, wexs_f)
            _, hbw = self.encoder_bw(None, wexs_b)
            _, hfc = self.encoder_fc(None, cexs_f)
            _, hbc = self.encoder_bc(None, cexs_b)

            hbw = [F.get_item(h, range(len(h))[::-1]) for h in hbw]
            hbc = [F.get_item(h, range(len(h))[::-1]) for h in hbc]
            htw = list(map(lambda x,y: F.concat([x, y], axis=1), hfw, hbw))
            htc = list(map(lambda x,y: F.concat([x, y], axis=1), hfc, hbc))
            ht = list(map(lambda x,y: F.concat([x, y], axis=0), htw, htc))

            ys = self.xp.full(batch, EOS, 'i')
            result = []
            h=None
            for i in range(max_length):
                eys = self.embed_y(ys)
                eys = chainer.functions.split_axis(eys, batch, 0)
                h_list, h_bar_list, c_s_list, z_s_list = self.decoder(h, ht, eys)
                cys = chainer.functions.concat(h_list, axis=0)
                wy = self.W(cys)
                ys = self.xp.argmax(wy.data, axis=1).astype('i')
                result.append(ys)
                h = F.transpose_sequence(h_list)[-1]
                h = F.reshape(h, (self.n_layers, h.shape[0], h.shape[1]))

        result = cuda.to_cpu(self.xp.stack(result).T)

        # Remove EOS taggs
        outs = []
        for y in result:
            inds = np.argwhere(y == EOS)
            if len(inds) > 0:
                y = y[:inds[0, 0]]
            outs.append(y)
        return outs
JA_Parallel_BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def translate(self, xs, max_length=100):
        print("Now translating")
        batch = len(xs)
        print("batch",batch)
        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            wxs = [np.array([source_word_ids.get(w, UNK) for w in x], dtype=np.int32) for x in xs]
            wx_len = [len(wx) for wx in wxs]
            wx_section = np.cumsum(wx_len[:-1])
            valid_wx_section = np.insert(wx_section, 0, 0)
            cxs = [np.array([source_char_ids.get(c, UNK) for c in list("".join(x))], dtype=np.int32) for x in xs]

            wexs = sequence_embed(self.embed_xw, wxs)
            cexs = sequence_embed(self.embed_xc, cxs)

            wexs_f = wexs
            wexs_b = [wex[::-1] for wex in wexs]
            cexs_f = cexs
            cexs_b = [cex[::-1] for cex in cexs]

            _, hfw = self.encoder_fw(None, wexs_f)
            h1, hbw = self.encoder_bw(None, wexs_b)
            _, hfc = self.encoder_fc(None, cexs_f)
            h2, hbc = self.encoder_bc(None, cexs_b)

            hbw = [F.get_item(h, range(len(h))[::-1]) for h in hbw]
            hbc = [F.get_item(h, range(len(h))[::-1]) for h in hbc]
            htw = list(map(lambda x,y: F.concat([x, y], axis=1), hfw, hbw))
            htc = list(map(lambda x,y: F.concat([x, y], axis=1), hfc, hbc))
            ht = list(map(lambda x,y: F.concat([x, y], axis=0), htw, htc))

            ys = self.xp.full(batch, EOS, 'i')
            result = []
            h = F.concat([h1, h2], axis=2)
            for i in range(max_length):
                eys = self.embed_y(ys)
                eys = chainer.functions.split_axis(eys, batch, 0)
                h_list, h_bar_list, c_s_list, z_s_list = self.decoder(h, ht, eys)
                cys = chainer.functions.concat(h_list, axis=0)
                wy = self.W(cys)
                ys = self.xp.argmax(wy.data, axis=1).astype('i')
                result.append(ys)
                h = F.transpose_sequence(h_list)[-1]
                h = F.reshape(h, (self.n_layers, h.shape[0], h.shape[1]))

        result = cuda.to_cpu(self.xp.stack(result).T)

        # Remove EOS taggs
        outs = []
        for y in result:
            inds = np.argwhere(y == EOS)
            if len(inds) > 0:
                y = y[:inds[0, 0]]
            outs.append(y)
        return outs


问题


面经


文章

微信
公众号

扫码关注公众号