python类load_npz()的实例源码

multi_input.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
simple.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
float.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
float_float.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
binary_base_fixed.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
fasterRCNN.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_model(gpu):
    model = FasterRCNN(gpu)
    model.train = False
    serializers.load_npz('misc/VGG16_faster_rcnn_final.model', model)

    return model
test.py 文件源码 项目:chainer-pix2pix 作者: wuhuikai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser(description='pix2pix --- GAN for Image to Image translation')
    parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--load_size', type=int, default=256, help='Scale image to load_size')
    parser.add_argument('--g_filter_num', type=int, default=64, help="# of filters in G's 1st conv layer")
    parser.add_argument('--d_filter_num', type=int, default=64, help="# of filters in D's 1st conv layer")
    parser.add_argument('--output_channel', type=int, default=3, help='# of output image channels')
    parser.add_argument('--n_layers', type=int, default=3, help='# of hidden layers in D')
    parser.add_argument('--list_path', default='list/val_list.txt', help='Path for test list')
    parser.add_argument('--out', default='result/test', help='Directory to output the result')
    parser.add_argument('--G_path', default='result/G.npz', help='Path for pretrained G')
    args = parser.parse_args()

    if not os.path.isdir(args.out):
        os.makedirs(args.out)

    # Set up GAN G
    G = Generator(args.g_filter_num, args.output_channel)
    serializers.load_npz(args.G_path, G)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        G.to_gpu()                               # Copy the model to the GPU

    with open(args.list_path) as f:
        imgs = f.readlines()

    total = len(imgs)
    for idx, img_path in enumerate(imgs):
        print('{}/{} ...'.format(idx+1, total))

        img_path = img_path.strip().split(' ')[-1]
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)[:, :, ::-1]
        h, w, _ = img.shape
        img = np.asarray(Image.fromarray(img).resize((args.load_size, args.load_size), resample=Image.NEAREST), dtype=np.float32)
        img = np.transpose(img, (2, 0, 1))

        A = data_process([img], device=args.gpu, volatile='on')
        B = np.squeeze(output2img(G(A, test=True, dropout=False)))

        Image.fromarray(B).resize((w, h), resample=Image.BILINEAR).save(os.path.join(args.out, os.path.basename(img_path).replace('gtFine_labelIds', 'leftImg8bit')))
train.py 文件源码 项目:chainer-ADDA 作者: pfnet-research 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main(args):
    # get datasets
    source_train, source_test = chainer.datasets.get_svhn()
    target_train, target_test = chainer.datasets.get_mnist(ndim=3, rgb_format=True)
    source = source_train, source_test

    # resize mnist to 32x32
    def transform(in_data):
        img, label = in_data
        img = resize(img, (32, 32))
        return img, label

    target_train = TransformDataset(target_train, transform)
    target_test = TransformDataset(target_test, transform)

    target = target_train, target_test

    # load pretrained source, or perform pretraining
    pretrained = os.path.join(args.output, args.pretrained_source)
    if not os.path.isfile(pretrained):
        source_cnn = pretrain_source_cnn(source, args)
    else:
        source_cnn = Loss(num_classes=10)
        serializers.load_npz(pretrained, source_cnn)

    # how well does this perform on target domain?
    test_pretrained_on_target(source_cnn, target, args)

    # initialize the target cnn (do not use source_cnn.copy)
    target_cnn = Loss(num_classes=10)
    # copy parameters from source cnn to target cnn
    target_cnn.copyparams(source_cnn)

    train_target_cnn(source, target, source_cnn, target_cnn, args)
predict_regression.py 文件源码 项目:chainer_sklearn 作者: corochann 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser(description='Regression predict')
    parser.add_argument('--modelpath', '-m', default='result/mlp.model',
                        help='Model path to be loaded')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--unit', '-u', type=int, default=50,
                        help='Number of units')
    args = parser.parse_args()

    batchsize = 128

    # Load dataset
    data, target = load_data()
    X = data.reshape((-1, 1)).astype(np.float32)
    y = target.reshape((-1, 1)).astype(np.float32)

    # Load trained model
    model = SklearnWrapperRegressor(MLP(args.unit, 1), device=args.gpu)
    serializers.load_npz(args.modelpath, model)

    # --- Example 1. Predict all test data ---
    outputs = model.predict(X,
                            batchsize=batchsize,
                            retain_inputs=False,)

    # --- Plot result ---
    plt.figure()
    plt.scatter(X, y, label='actual')
    plt.plot(X, outputs, label='predict', color='red')
    plt.legend()
    plt.show()
    plt.savefig('predict.png')
i_maze_rogue.py 文件源码 项目:chainer_frmqn 作者: okdshin 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load(self, name):
        serializers.load_npz(name+".model", self.dqn.model)
        serializers.load_npz(name+".optimizer", self.dqn.optimizer)
eval_qrnn.py 文件源码 项目:chainer-qrnn 作者: butsugiri 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main(args):
    # load config file and obtain embed dimension and hidden dimension
    with open(args.config_path, 'r') as fi:
        config = json.load(fi)
        embed_dim = config["dim"]
        hidden_dim = config["unit"]
        print("Embedding Dimension: {}\nHidden Dimension: {}\n".format(embed_dim, hidden_dim), file=sys.stderr)

    # load data
    dp = DataProcessor(data_path=config["data"], test_run=False)
    dp.prepare_dataset()

    # create model
    vocab = dp.vocab
    model = RecNetClassifier(QRNNLangModel(n_vocab=len(vocab), embed_dim=embed_dim, out_size=hidden_dim))

    # load parameters
    print("loading paramters to model...", end='', file=sys.stderr, flush=True)
    S.load_npz(filename=args.model_path, obj=model)
    print("done.", file=sys.stderr, flush=True)

    # create iterators from loaded data
    bprop_len = config["bproplen"]
    test_data = dp.test_data
    test_iter = ParallelSequentialIterator(test_data, 1, repeat=False, bprop_len=bprop_len)

    # evaluate the model
    print('testing...', end='', file=sys.stderr, flush=True)
    model.predictor.reset_state()
    model.predictor.train = False
    evaluator = extensions.Evaluator(test_iter, model, converter=convert)
    result = evaluator()
    print('done.\n', file=sys.stderr, flush=True)
    print('Perplexity: {}'.format(np.exp(float(result['main/loss']))), end='', file=sys.stderr, flush=True)
generate.py 文件源码 项目:cv-api 作者: yasunorikudo 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self):
        self._model = FastStyleNet()
        serializers.load_npz('composition.model', self._model)
        cuda.get_device(0).use()
        self._model.to_gpu()
resource.py 文件源码 项目:machine_learning_in_application 作者: icoxfog417 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_model(self, model):
        if not os.path.exists(self.model_path):
            raise Exception("model file directory does not exist.")

        suffix = ".model"
        keyword = model.__class__.__name__.lower()
        candidates = []
        for f in os.listdir(self.model_path):
            if keyword in f and f.endswith(suffix):
                candidates.append(f)
        candidates.sort()
        latest = candidates[-1]
        #print("targets {}, pick up {}.".format(candidates, latest))
        model_file = os.path.join(self.model_path, latest)
        serializers.load_npz(model_file, model)
video.py 文件源码 项目:chainer-fast-neuralstyle-video 作者: gafr 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _transform(in_image,loaded,m_path):
    if m_path == 'none':
        return in_image
    if not loaded:
        serializers.load_npz(m_path, model)
        if RUN_ON_GPU:
            cuda.get_device(0).use() #assuming only one core
            model.to_gpu()
        print "loaded"

    xp = np if not RUN_ON_GPU else cuda.cupy

    image = xp.asarray(in_image, dtype=xp.float32).transpose(2, 0, 1)
    image = image.reshape((1,) + image.shape)
    image -= 120

    x = Variable(image)
    y = model(x)

    result = cuda.to_cpu(y.data)
    result = result.transpose(0, 2, 3, 1)
    result = result.reshape((result.shape[1:]))
    result += 120
    result = np.uint8(result)

    return result
ewc_mnist.py 文件源码 项目:chainer-EWC 作者: okdshin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def train_tasks_continuosly(
        args, model, train, test, train2, test2, enable_ewc):
    # Train Task A or load trained model
    if os.path.exists("mlp_taskA.model") or args.skip_taskA:
        print("load taskA model")
        serializers.load_npz("./model50/mlp_taskA.model", model)
    else:
        print("train taskA")
        train_task(args, "train_task_a"+("_with_ewc" if enable_ewc else ""),
                   model, args.epoch, train,
                   {"TaskA": test}, args.batchsize)
        print("save the model")
        serializers.save_npz("mlp_taskA.model", model)

    if enable_ewc:
        print("enable EWC")
        model.compute_fisher(train)
        model.store_variables()

    # Train Task B
    print("train taskB")
    train_task(args, "train_task_ab"+("_with_ewc" if enable_ewc else ""),
               model, args.epoch, train2,
               {"TaskA": test, "TaskB": test2}, args.batchsize)
    print("save the model")
    serializers.save_npz(
            "mlp_taskAB"+("_with_ewc" if enable_ewc else "")+".model", model)
parse.py 文件源码 项目:blstm-cws 作者: chantera 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def parse(model_file, embed_file):

    # Load files
    Log.i('initialize preprocessor with %s' % embed_file)
    processor = Preprocessor(embed_file)

    Log.v('')
    Log.v("initialize ...")
    Log.v('')

    with np.load(model_file) as f:
        embeddings = np.zeros(f['embed/W'].shape, dtype=np.float32)

    # Set up a neural network
    cls = BLSTMCRF if _use_crf else BLSTM
    model = cls(
        embeddings=embeddings,
        n_labels=4,
        dropout=0.2,
        train=False,
    )
    Log.i("loading a model from %s ..." % model_file)
    serializers.load_npz(model_file, model)

    LABELS = ['B', 'M', 'E', 'S']

    def _process(raw_text):
        if not raw_text:
            return
        xs = [processor.transform_one([c for c in raw_text])]
        ys = model.parse(xs)
        labels = [LABELS[y] for y in ys[0]]
        print(' '.join(labels))
        seq = []
        for c, label in zip(raw_text, labels):
            seq.append(c)
            if label == 'E' or label == 'S':
                seq.append(' ')
        print(''.join(seq))
        print('-')

    print("Input a Chinese sentence! (use 'q' to exit)")
    while True:
        x = input()
        if x == 'q':
            break
        _process(x)
train.py 文件源码 项目:chainer-stack-gan 作者: dsanno 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main():
    args = parse_args()
    gen = net.Generator1()
    dis = net.Discriminator1()
    clip_rect = None
    if args.clip_rect:
        clip_rect = map(int, args.clip_rect.split(','))
        clip_rect = tuple([clip_rect[0], clip_rect[1], clip_rect[0] + clip_rect[2], clip_rect[1] + clip_rect[3]])

    gpu_device = None
    if args.gpu >= 0:
        device_id = args.gpu
        cuda.get_device(device_id).use()
        gen.to_gpu(device_id)
        dis.to_gpu(device_id)

    optimizer_gen = optimizers.Adam(alpha=0.001)
    optimizer_gen.setup(gen)
    optimizer_dis = optimizers.Adam(alpha=0.001)
    optimizer_dis.setup(dis)

    if args.input != None:
        serializers.load_npz(args.input + '.gen.model', gen)
        serializers.load_npz(args.input + '.gen.state', optimizer_gen)
        serializers.load_npz(args.input + '.dis.model', dis)
        serializers.load_npz(args.input + '.dis.state', optimizer_dis)

    if args.out_image_dir != None:
        if not os.path.exists(args.out_image_dir):
            try:
                os.mkdir(args.out_image_dir)
            except:
                print 'cannot make directory {}'.format(args.out_image_dir)
                exit()
        elif not os.path.isdir(args.out_image_dir):
            print 'file path {} exists but is not directory'.format(args.out_image_dir)
            exit()

    with open(args.dataset, 'rb') as f:
        images = pickle.load(f)

    train(gen, dis, optimizer_gen, optimizer_dis, images, args.epoch, batch_size=args.batch_size, margin=args.margin, save_epoch=args.save_epoch, lr_decay=args.lr_decay, output_path=args.output, out_image_dir=args.out_image_dir, clip_rect=clip_rect)
train2.py 文件源码 项目:chainer-stack-gan 作者: dsanno 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main():
    args = parse_args()
    gen1 = net.Generator1()
    gen2 = net.Generator2()
    dis = net.Discriminator2()
    clip_rect = None
    if args.clip_rect:
        clip_rect = map(int, args.clip_rect.split(','))
        clip_rect = tuple([clip_rect[0], clip_rect[1], clip_rect[0] + clip_rect[2], clip_rect[1] + clip_rect[3]])

    device_id = None
    if args.gpu >= 0:
        device_id = args.gpu
        cuda.get_device(device_id).use()
        gen1.to_gpu(device_id)
        gen2.to_gpu(device_id)
        dis.to_gpu(device_id)

    optimizer_gen = optimizers.Adam(alpha=0.001)
    optimizer_gen.setup(gen2)
    optimizer_dis = optimizers.Adam(alpha=0.001)
    optimizer_dis.setup(dis)

    serializers.load_npz(args.stack1 + '.gen.model', gen1)
    if args.input != None:
        serializers.load_npz(args.input + '.gen.model', gen2)
        serializers.load_npz(args.input + '.gen.state', optimizer_gen)
        serializers.load_npz(args.input + '.dis.model', dis)
        serializers.load_npz(args.input + '.dis.state', optimizer_dis)

    if args.out_image_dir != None:
        if not os.path.exists(args.out_image_dir):
            try:
                os.mkdir(args.out_image_dir)
            except:
                print 'cannot make directory {}'.format(args.out_image_dir)
                exit()
        elif not os.path.isdir(args.out_image_dir):
            print 'file path {} exists but is not directory'.format(args.out_image_dir)
            exit()

    with open(args.dataset, 'rb') as f:
        images = pickle.load(f)

    train(gen1, gen2, dis, optimizer_gen, optimizer_dis, images, args.epoch, batch_size=args.batch_size, margin=args.margin, save_epoch=args.save_epoch, lr_decay=args.lr_decay, output_path=args.output, out_image_dir=args.out_image_dir, clip_rect=clip_rect)
infer_camera.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def inference():
    cap = cv2.VideoCapture(0)


    # load model
    model = Netmodel('eval-model', CLASSES)
    serializers.load_npz(MODEL_NAME, model)
    cuda.get_device(GPU_ID).use()
    model.to_gpu()

    LUT = fromHEX2RGB(stats_opts['colormap'] )
    fig3, axarr3 = plt.subplots(1, 1)

    batchRGB = np.zeros((1, 3, NEWSIZE[1], NEWSIZE[0]), dtype='float32')

    while(True):
            # Capture frame-by-frame
            ret, frame = cap.read()

        # process frame
        im = misc.imresize(frame, NEWSIZE, interp='bilinear')
        # convertion from HxWxCH to CHxWxH
        batchRGB[0,:,:,:] = im.astype(np.float32).transpose((2,1,0))
        batchRGBn = batchRGB  - 127.0

        # data ready
        batch = chainer.Variable(cuda.cupy.asarray(batchRGBn))

        # make predictions
        model((batch, []), test_mode=2)
        pred = model.probs.data.argmax(1)
        # move data back to CPU
        pred_ = cuda.to_cpu(pred)

        pred_ = LUT[pred_+1,:].squeeze()
        pred_ = pred_.transpose((1,0,2))
        pred2 = cv2.cvtColor(pred_, cv2.COLOR_BGR2RGB)

            # Display the resulting frame
        cv2.imshow('frame',frame)
        cv2.imshow('pred',pred2)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
infer_file.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def inference():
    cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
    #cv2.namedWindow('pred', cv2.WINDOW_NORMAL)


    # load model
    model = Netmodel('eval-model', CLASSES)
    serializers.load_npz(MODEL_NAME, model)
    cuda.get_device(GPU_ID).use()
    model.to_gpu()

    LUT = fromHEX2RGB(stats_opts['colormap'] )
    fig3, axarr3 = plt.subplots(1, 1)

    batchRGB = np.zeros((1, 3, NEWSIZE[1], NEWSIZE[0]), dtype='float32')

    # go throught the data
    flist = []
    with open(TESTFILE) as f:
        for line in f:
            cline = re.split('\n',line)

            #print(cline[0])
            frame = misc.imread(cline[0])
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # process frame
            im = misc.imresize(frame, NEWSIZE, interp='bilinear')
            # convertion from HxWxCH to CHxWxH
            batchRGB[0,:,:,:] = im.astype(np.float32).transpose((2,1,0))
            batchRGBn = batchRGB  - 127.0

            # data ready
            batch = chainer.Variable(cuda.cupy.asarray(batchRGBn))

            # make predictions
            model((batch, []), test_mode=2)
            pred = model.probs.data.argmax(1)
            # move data back to CPU
            pred_ = cuda.to_cpu(pred)

            pred_ = LUT[pred_+1,:].squeeze()
            pred_ = pred_.transpose((1,0,2))
            pred2 = cv2.cvtColor(pred_, cv2.COLOR_BGR2RGB)

            #ipdb.set_trace()
            disp = (0.4*im + 0.6*pred2).astype(np.uint8)

                # Display the resulting frame
            cv2.imshow('frame',disp)
            #cv2.imshow('pred',pred2)
                if cv2.waitKey(-1) & 0xFF == ord('q'):
                    break
    cv2.destroyAllWindows()


问题


面经


文章

微信
公众号

扫码关注公众号