python类dump()的实例源码

peda-arm.py 文件源码 项目:peda-arm 作者: alset0326 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_snapshot(self, filename=None):
        """
        Save a snapshot of current process to file
        Warning: this is not thread safe, do not use with multithread program

        Args:
            - filename: target file to save snapshot

        Returns:
            - Bool
        """
        if not filename:
            filename = self.get_config_filename("snapshot")

        snapshot = self.take_snapshot()
        if not snapshot:
            return False
        # dump to file
        fd = open(filename, "wb")
        pickle.dump(snapshot, fd, pickle.HIGHEST_PROTOCOL)
        fd.close()

        return True
peda-arm.py 文件源码 项目:peda-arm 作者: alset0326 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dumpmem(self, start, end):
        """
        Dump process memory from start to end

        Args:
            - start: start address (Int)
            - end: end address (Int)

        Returns:
            - memory content (raw bytes)
        """
        mem = None
        logfd = tmpfile(is_binary_file=True)
        logname = logfd.name
        out = self.execute_redirect("dump memory %s 0x%x 0x%x" % (logname, start, end))
        if out is not None:
            logfd.flush()
            mem = logfd.read()
            logfd.close()

        return mem
peda-arm.py 文件源码 项目:peda-arm 作者: alset0326 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def readmem(self, address, size):
        """
        Read content of memory at an address

        Args:
            - address: start address to read (Int)
            - size: bytes to read (Int)

        Returns:
            - memory content (raw bytes)
        """
        # try fast dumpmem if it works
        mem = self.dumpmem(address, address + size)
        if mem is not None:
            return mem

        # failed to dump, use slow x/gx way
        mem = ""
        out = self.execute_redirect("x/%dbx 0x%x" % (size, address))
        if out:
            for line in out.splitlines():
                bytes = line.split(":\t")[-1].split()
                mem += "".join([chr(int(c, 0)) for c in bytes])

        return mem
__init__.py 文件源码 项目:neural_mt 作者: chrishokamp 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def dump(self, main_loop):
        if not os.path.exists(self.path_to_folder):
            os.mkdir(self.path_to_folder)
        print("")
        logger.info(" Saving model")
        start = time.time()
        logger.info(" ...saving parameters")
        self.dump_parameters(main_loop)
        logger.info(" ...saving iteration state")
        self.dump_iteration_state(main_loop)
        logger.info(" ...saving log")
        self.dump_log(main_loop)
        logger.info(" Model saved, took {} seconds.".format(time.time()-start))

        # Write the time and model path to the main loop
        # write the iteration count and the path to the model params.npz (note hardcoding of params.npz)
        main_loop.log.status['last_saved_model'] = (self.main_loop.log.status['iterations_done'], self.path_to_parameters)
read_LaMemDataset.py 文件源码 项目:Colorization.tensorflow 作者: shekkizh 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def read_dataset(data_dir):
    pickle_filename = "lamem.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_tarfile=True)
        lamem_folder = (DATA_URL.split("/")[-1]).split(os.path.extsep)[0]
        result = {'images': create_image_lists(os.path.join(data_dir, lamem_folder))}
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_records = result['images']
        del result

    return training_records
cmodule.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def save_pkl(self):
        """
        Dump this object into its `key_pkl` file.

        May raise a cPickle.PicklingError if such an exception is raised at
        pickle time (in which case a warning is also displayed).

        """
        # Note that writing in binary mode is important under Windows.
        try:
            with open(self.key_pkl, 'wb') as f:
                pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
        except pickle.PicklingError:
            _logger.warning("Cache leak due to unpickle-able key data %s",
                            self.keys)
            os.remove(self.key_pkl)
            raise
utils.py 文件源码 项目:TextGAN 作者: AustinStoneProjects 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def preprocess(self, input_file, vocab_file, tensor_file):
        with open(input_file, "r") as f:
            data = f.read()

        # Optional text cleaning or make them lower case, etc.
        data = self.clean_str(data)
        x_text = data.split()
        self.vocab, self.words = self.build_vocab(x_text)

        with open(vocab_file, 'wb') as f:
            cPickle.dump(self.words, f)

        self.tensor = []
        for word in x_text:
            if not self.vocab.has_key(word):
                self.tensor.append(self.vocab['UNK'])
            else:
                self.tensor.append(self.vocab[word])
        self.tensor = np.asarray(self.tensor)
        # Save the data to data.npy
        np.save(tensor_file, self.tensor)
Model.py 文件源码 项目:cxr_classification 作者: harishanand95 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def save(self, dataset_filename="CXR_png.pickle", overwrite=False):
        if self._dataset is None:
            print("Dataset is empty. Run load_images before saving.")
            return

        data = {"dataset": self._dataset,
                "labels": self._labels,
                "valid_images_count": self._valid_images_count,
                "width": self._image_width,
                "height": self._image_height,
                "convert_to_gray": self._convert_to_gray,
                "folder": self._folder,
                "test_dataset": self._test_dataset,
                "test_labels": self._test_labels,
                "test_data_size": self._test_data_size}

        if overwrite is True:
            if os.path.isfile(dataset_filename):
                os.remove(dataset_filename)
        try:
            with open(dataset_filename, 'wb') as f:
                pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
        except Exception as e:
            print('Unable to save data to', dataset_filename, ':', e)
utils.py 文件源码 项目:TrickleDownML 作者: andykamath 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def preprocess(self, input_file, vocab_file, tensor_file):
        with open(input_file, "r") as f:
            data = f.read()

        # Optional text cleaning or make them lower case, etc.
        #data = self.clean_str(data)
        x_text = data.split()

        self.vocab, self.words = self.build_vocab(x_text)
        self.vocab_size = len(self.words)

        with open(vocab_file, 'wb') as f:
            cPickle.dump(self.words, f)

        #The same operation like this [self.vocab[word] for word in x_text]
        # index of words as our basic data
        self.tensor = np.array(list(map(self.vocab.get, x_text)))
        # Save the data to data.npy
        np.save(tensor_file, self.tensor)
processor.py 文件源码 项目:nuts-flow 作者: maet3608 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __rrshift__(self, iterable):
        """
        Return elements in iterable.

        :param iterable iterable: Any iterable
        :return: Generator over same elements as input iterable.
        :rtype: Generator
        """
        if self.path or (self._cachepath and not self._clearcache):
            for e in self.__iter__():
                yield e
        else:
            self._create_cache()
            for i, e in enumerate(iterable):
                with open(self._fpath(i), 'wb') as f:
                    pickle.dump(e, f, pickle.HIGHEST_PROTOCOL)
                yield e
layers.py 文件源码 项目:fxnn 作者: khaotik 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def save_params(self, f_, filter_=None):
        #FIXME: this does not save shared_variable properties like "strict" or "allow_downcast"
        if filter_ is None:
            pickle.dump({k:v.get_value() for k,v in self._vars_di.items()}, f_)
        else:
            pat = re.compile(filter_)
            pickle.dump({k:v.get_value() for k,v in self._vars_di.items() if pat.fullmatch(k)}, f_)
format.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pickle_dump(data, filename):
    """Serialize data to file using gzip compression."""
    if filename.endswith('.pkz'):
        with gzip.open(filename, 'wb') as f:
            pickle.dump(data, f, protocol=2)  # Try to support python 2.
    elif filename.endswith('.jz'):
        with gzip.open(filename, 'wt') as f:
            f.write(json_dumps(data))
    else:
        raise ValueError(
            'Cannot determine format: {}'.format(os.path.basename(filename)))
community.py 文件源码 项目:micom 作者: resendislab 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def to_pickle(self, filename):
        """Save a community in serialized form.

        Parameters
        ----------
        filename : str
            Where to save the pickled community.

        Returns
        -------
        Nothing

        """
        with open(filename, mode="wb") as out:
            pickle.dump(self, out)
util.py 文件源码 项目:micom 作者: resendislab 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def serialize_models(files, dir="."):
    """Convert several models to Python pickles."""
    for f in files:
        fname = path.basename(f).split(".")[0]
        model = load_model(f)
        logger.info("serializing {}".format(f))
        pickle.dump(model, open(path.join(dir, fname + ".pickle"), "wb"),
                    protocol=2)  # required for Python 2 compat
char_generator.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def preprocess(self, input_file, vocab_file, tensor_file):
        with codecs.open(input_file, "r", encoding=self.encoding) as f:
            data = f.read()
        counter = collections.Counter(data)
        count_pairs = sorted(counter.items(), key=lambda x: -x[1])
        self.chars, _ = zip(*count_pairs)
        self.vocab_size = len(self.chars)
        self.vocab = dict(zip(self.chars, range(len(self.chars))))
        with open(vocab_file, 'wb') as f:
            cPickle.dump(self.chars, f)
        self.tensor = np.array(list(map(self.vocab.get, data)))
        np.save(tensor_file, self.tensor)
extract_params.py 文件源码 项目:deeplab_v1_tf1.0 作者: automan000 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main():
    """Extract and save network skeleton with the corresponding weights.

    Raises:
      ImportError: PyCaffe module is not found."""
    args = get_arguments()
    sys.path.append(args.pycaffe_path)
    try:
        import caffe
    except ImportError:
        raise
    # Load net definition.
    net = caffe.Net('./util/deploy.prototxt', args.caffemodel, caffe.TEST)

    # Check the existence of output_dir.
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # Net skeleton with parameters names and shapes.
    # In TF, the filter shape is as follows: [ks, ks, input_channels, output_channels],
    # while in Caffe it looks like this: [output_channels, input_channels, ks, ks].
    net_skeleton = list() 
    for name, item in net.params.iteritems():
        net_skeleton.append([name + '/w', item[0].data.shape[::-1]]) # See the explanataion on filter formats above.
        net_skeleton.append([name + '/b', item[1].data.shape])

    with open(os.path.join(args.output_dir, 'net_skeleton.ckpt'), 'wb') as f:
        cPickle.dump(net_skeleton, f, protocol=cPickle.HIGHEST_PROTOCOL)

    # Net weights. 
    net_weights = dict()
    for name, item in net.params.iteritems():
        net_weights[name + '/w'] = item[0].data.transpose(2, 3, 1, 0) # See the explanation on filter formats above.
        net_weights[name + '/b'] = item[1].data
    with open(os.path.join(args.output_dir,'net_weights.ckpt'), 'wb') as f:
        cPickle.dump(net_weights, f, protocol=cPickle.HIGHEST_PROTOCOL)
    del net, net_skeleton, net_weights
utils.py 文件源码 项目:kor-char-rnn-tensorflow 作者: insikk 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def preprocess(self, input_file, vocab_file, tensor_file):
        with codecs.open(input_file, "r", encoding=self.encoding) as f:
            data = f.read()
        counter = collections.Counter(data)
        count_pairs = sorted(counter.items(), key=lambda x: -x[1])
        self.chars, _ = zip(*count_pairs)
        self.vocab_size = len(self.chars)
        self.vocab = dict(zip(self.chars, range(len(self.chars))))
        with open(vocab_file, 'wb') as f:
            cPickle.dump(self.chars, f)
        self.tensor = np.array(list(map(self.vocab.get, data)))
        np.save(tensor_file, self.tensor)
data_handlers.py 文件源码 项目:feagen 作者: ianlini 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def write_data(self, result_dict):
        for key, val in six.viewitems(result_dict):
            pickle_path = os.path.join(self.pickle_dir, key + ".pkl")
            with SimpleTimer("Writing generated data %s to pickle file" % key,
                             end_in_new_line=False), \
                    open(pickle_path, "wb") as fp:
                cPickle.dump(val, fp, protocol=cPickle.HIGHEST_PROTOCOL)
read_celebADataset.py 文件源码 项目:WassersteinGAN.tensorflow 作者: shekkizh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read_dataset(data_dir):
    pickle_filename = "celebA.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        # utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
        celebA_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
        dir_path = os.path.join(data_dir, celebA_folder)
        if not os.path.exists(dir_path):
            print ("CelebA dataset needs to be downloaded and unzipped manually")
            print ("Download from: %s" % DATA_URL)
            raise ValueError("Dataset not found")

        result = create_image_lists(dir_path)
        print ("Training set: %d" % len(result['train']))
        print ("Test set: %d" % len(result['test']))
        print ("Validation set: %d" % len(result['validation']))
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        celebA = CelebA_Dataset(result)
        del result
    return celebA
utils.py 文件源码 项目:MIL.pytorch 作者: gujiuxiang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_variables(pickle_file_name, var, info, overwrite=False):
    if os.path.exists(pickle_file_name) and overwrite == False:
        raise Exception('{:s} exists and over write is false.'.format(pickle_file_name))
    # Construct the dictionary
    assert (type(var) == list);
    assert (type(info) == list);
    d = {}
    for i in xrange(len(var)):
        d[info[i]] = var[i]
    with open(pickle_file_name, 'wb') as f:
        cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL)


问题


面经


文章

微信
公众号

扫码关注公众号