python类dump()的实例源码

tools.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def save_params(self, weights_file, catched=False):
        """Save the model's params."""
        with open(weights_file, "w") as f:
            if catched:
                if self.catched_params != []:
                    params_vl = self.catched_params
                else:
                    raise ValueError(
                        "You asked to save catched params," +
                        "but you didn't catch any!!!!!!!")
            else:
                params_vl = [param.get_value() for param in self.params]
            ft_extractor = False
            if self.ft_extractor is not None:
                ft_extractor = True
            stuff = {"layers_infos": self.layers_infos,
                     "params_vl": params_vl,
                     "tag": self.tag,
                     "dropout": self.dropout,
                     "ft_extractor": ft_extractor}
            pkl.dump(stuff, f, protocol=pkl.HIGHEST_PROTOCOL)
tools.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def save_params(self, weights_file, catched=False):
        """Save the model's params."""
        with open(weights_file, "w") as f:
            if catched:
                if self.catched_params != []:
                    params_vl = self.catched_params
                else:
                    raise ValueError(
                        "You asked to save catched params," +
                        "but you didn't catch any!!!!!!!")
            else:
                params_vl = [param.get_value() for param in self.params]
            ft_extractor = False
            if self.ft_extractor is not None:
                ft_extractor = True
            stuff = {"layers_infos": self.layers_infos,
                     "params_vl": params_vl,
                     "tag": self.tag,
                     "dropout": self.dropout,
                     "ft_extractor": ft_extractor,
                     "dic_keys": self.dic_keys,
                     "config_arch": self.config_arch,
                     "crop_size": self.crop_size}
            pkl.dump(stuff, f, protocol=pkl.HIGHEST_PROTOCOL)
ae.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_params(self, weights_file, catched=False):
        """Save the model's parameters."""
        f_dump = open(weights_file, "w")
        params_vls = []
        if catched:
            if self.catched_params != []:
                params_vls = self.catched_params
            else:
                raise ValueError(
                    "You asked to save catched params," +
                    "but you didn't catch any!!!!!!!")
        else:
            for param in self.params:
                params_vls.append(param.get_value())
        pkl.dump(params_vls, f_dump, protocol=pkl.HIGHEST_PROTOCOL)
        f_dump.close()
keras_cnn.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def set_params(mo, bparams):
    i = 0
    for la in mo.layers:
        we = bparams[i:i+2]
        print len(we)
        la.set_weights(we)
        i += 2
    return mo

#with open("best_model_keras.pkl", 'r') as f:
#    b_params = pkl.load(f)
#
#model = set_params(model, b_params)
#out = model.predict(xvl, batch_size=xvl.shape[0], verbose=0)
#error = np.mean(np.mean(np.power(out - yvl, 2), axis=1))
#print "Error vl", error
#sys.exit()

#init_p = get_params(model)
#with open("init_keras_param.pkl", 'w') as f:
#    pkl.dump(init_p, f)
workflow.py 文件源码 项目:alphy 作者: maximepeschard 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def register(self, name, serializer):
        """Register ``serializer`` object under ``name``.

        Raises :class:`AttributeError` if ``serializer`` in invalid.

        .. note::

            ``name`` will be used as the file extension of the saved files.

        :param name: Name to register ``serializer`` under
        :type name: ``unicode`` or ``str``
        :param serializer: object with ``load()`` and ``dump()``
            methods

        """

        # Basic validation
        getattr(serializer, 'load')
        getattr(serializer, 'dump')

        self._serializers[name] = serializer
workflow.py 文件源码 项目:alphy 作者: maximepeschard 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open pickle file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: Python object
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """

        return pickle.dump(obj, file_obj, protocol=-1)


# Set up default manager and register built-in serializers
workflow.py 文件源码 项目:alphy 作者: maximepeschard 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def save(self):
        """Save settings to JSON file specified in ``self._filepath``

        If you're using this class via :attr:`Workflow.settings`, which
        you probably are, ``self._filepath`` will be ``settings.json``
        in your workflow's data directory (see :attr:`~Workflow.datadir`).
        """
        if self._nosave:
            return
        data = {}
        data.update(self)
        # for key, value in self.items():
        #     data[key] = value
        with LockFile(self._filepath):
            with atomic_writer(self._filepath, 'wb') as file_obj:
                json.dump(data, file_obj, sort_keys=True, indent=2,
                          encoding='utf-8')

    # dict methods
nyud2_voc.py 文件源码 项目:fast-rcnn-distillation 作者: xiaolonw 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_nyud2_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
pascal_voc.py 文件源码 项目:fast-rcnn-distillation 作者: xiaolonw 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_pascal_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
pascal_voc.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_pascal_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
coco.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.
        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if osp.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_coco_annotation(index)
                    for index in self._image_index]

        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)
        return gt_roidb
coco.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _write_coco_results_file(self, all_boxes, res_file):
        # [{"image_id": 42,
        #   "category_id": 18,
        #   "bbox": [258.15,41.29,348.26,243.78],
        #   "score": 0.236}, ...]
        results = []
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
                                                          self.num_classes - 1)
            coco_cat_id = self._class_to_coco_cat_id[cls]
            results.extend(self._coco_results_one_category(all_boxes[cls_ind],
                                                           coco_cat_id))
        print 'Writing results json to {}'.format(res_file)
        with open(res_file, 'w') as fid:
            json.dump(results, fid)
cache.py 文件源码 项目:zanph 作者: zanph 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def set(self, key, value, timeout=None):
        if timeout is None:
            timeout = int(time() + self.default_timeout)
        elif timeout != 0:
            timeout = int(time() + timeout)
        filename = self._get_filename(key)
        self._prune()
        try:
            fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
                                       dir=self._path)
            with os.fdopen(fd, 'wb') as f:
                pickle.dump(timeout, f, 1)
                pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
            rename(tmp, filename)
            os.chmod(filename, self._mode)
        except (IOError, OSError):
            return False
        else:
            return True
MovieSelection.py 文件源码 项目:enigma2 作者: OpenLD 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def saveLocalSettings(self):
        if not config.movielist.settings_per_directory.value:
            return
        try:
            path = os.path.join(config.movielist.last_videodir.value, ".e2settings.pkl")
            file = open(path, "wb")
            pickle.dump(self.settings, file)
            file.close()
        except Exception, e:
            print "[MovieSelection] Failed to save settings to %s: %s" % (path, e)
        # Also set config items, in case the user has a read-only disk
        config.movielist.moviesort.value = self.settings["moviesort"]
        config.movielist.description.value = self.settings["description"]
        config.usage.on_movie_eof.value = self.settings["movieoff"]
        # save moviesort and movieeof values for using by hotkeys
#       config.movielist.moviesort.save()
        config.usage.on_movie_eof.save()
bidirectional.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X_fwd = self.X_fwd[sample_id]
        self.X_bwd = self.X_bwd[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # Forward review
        self.X_trn_fwd = self.X_fwd[0:self.train_size]
        self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]
        # Backward review
        self.X_trn_bwd = self.X_bwd[0:self.train_size]
        self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
stacked_simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X = self.X[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # review
        self.X_trn = self.X[0:self.train_size]
        self.X_tst = self.X[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst_fwd
        Note that only the reviews are changed, and not the summary.

        :return: None
        """

        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X = self.X[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # review
        self.X_trn = self.X[0:self.train_size]
        self.X_tst = self.X[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
stacked_bidirectional.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X_fwd = self.X_fwd[sample_id]
        self.X_bwd = self.X_bwd[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # Forward review
        self.X_trn_fwd = self.X_fwd[0:self.train_size]
        self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]
        # Backward review
        self.X_trn_bwd = self.X_bwd[0:self.train_size]
        self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
sequence2sequence.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def save(self, path):
        if not os.path.exists(path): os.makedirs(path)
        self.src_vocab.save(path+"/vocab.src")
        self.tgt_vocab.save(path+"/vocab.tgt")
        self.m.save(path+"/params")
        with open(path+"/args", "w") as f: pickle.dump(self.args, f)
util.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def save(self, filename):
        info_dict = {
            "tokens":self.tokens,
            "strings":self.strings,
            "s2t":dict(self.s2t),
            "i2t":dict(self.i2t),
            "unk":self.unk,
            "START_TOK":self.START_TOK,
            "END_TOK":self.END_TOK
        }
        with open(filename, "w") as f: pickle.dump(info_dict, f)


问题


面经


文章

微信
公众号

扫码关注公众号