python类load()的实例源码

nb_utils.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _load_experiments(self, data_folder, name_or_patterns):
        if not isinstance(name_or_patterns, (list, tuple)):
            name_or_patterns = [name_or_patterns]
        files = []
        for name_or_pattern in name_or_patterns:
            matched_files = glob(
                osp.join(data_folder, name_or_pattern))  # golb gives a list of all files satisfying pattern
            files += matched_files  # this will include twice the same file if it satisfies 2 patterns
        experiments = []
        progress_f = None
        params_f = None
        pkl_data = None
        for f in files:
            if os.path.isdir(f):
                try:
                    progress = self._read_data(osp.join(f, "progress.csv"))
                    params = self._read_params(osp.join(f, "params.json"))
                    params["exp_name"] = osp.basename(f)
                    if os.path.isfile(osp.join(f, "params.pkl")):
                        pkl_data = joblib.load(osp.join(f, "params.pkl"))
                        experiments.append(Experiment(progress, params, pkl_data))
                    else:
                        experiments.append(Experiment(progress, params))
                except Exception as e:
                    print(e)
            elif 'progress.csv' in f:  # in case you're giving as datafolder the dir that contains the files!
                progress_f = self._read_data(f)
            elif 'params.json' in f:
                params_f = self._read_params(f)
            elif 'params.pkl' in f:
                print('about to load', f)
                pkl_data = joblib.load(f)
        if params_f and progress_f:
            if pkl_data:
                experiments.append(Experiment(progress_f, params_f, pkl_data))
            else:
                experiments.append(Experiment(progress_f, params_f))

        self._experiments = experiments
batch_polopt.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def restore(self, checkpoint_dir=None):
        if checkpoint_dir is None: checkpoint_dir = logger.get_snapshot_dir()
        checkpoint_file = os.path.join(checkpoint_dir, 'params.chk')
        if os.path.isfile(checkpoint_file + '.meta'):
            sess = tf.get_default_session()
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_file)

            tabular_chk_file = os.path.join(checkpoint_dir, 'progress.csv.chk')
            if os.path.isfile(tabular_chk_file):
                tabular_file = os.path.join(checkpoint_dir, 'progress.csv')
                logger.remove_tabular_output(tabular_file)
                shutil.copy(tabular_chk_file, tabular_file)
                logger.add_tabular_output(tabular_file)

            if self.qf is not None:
                pool_file = os.path.join(checkpoint_dir, 'pool.chk')
                if self.save_format == 'pickle':
                    pickle_load(pool_file)
                elif self.save_format == 'joblib':
                    self.pool = joblib.load(pool_file)
                else: raise NotImplementedError

            logger.log('Restored from checkpoint %s'%checkpoint_file)
        else:
            logger.log('No checkpoint %s'%checkpoint_file)
utils.py 文件源码 项目:SRLF 作者: Fritz449 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_object(string):
    # converts string to whatever was dumps'ed in it
    return joblib.load(BytesIO(string))
06_create_patches_bbox.py 文件源码 项目:kaggle-lung-cancer 作者: mdai 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def process_study(study_id, annotations, out_dir, nstack):
    volumes_metadata = isotropic_volumes_metadata[study_id]
    isometric_volume = np.load('../data_proc/stage1/isotropic_volumes_1mm/{}.npy'.format(study_id))
    mean = np.mean(isometric_volume).astype(np.float32)
    std = np.std(isometric_volume).astype(np.float32)
    resize_factor = np.divide(volumes_metadata['volume_resampled_shape'], volumes_metadata['volume_shape'])

    coords_list = []
    for a in annotations:
        d = a['data']
        z = int(round(resize_factor[0] * a['sliceNum']))
        y0 = resize_factor[1] * d['y']
        y1 = resize_factor[1] * (d['y'] + d['height'])
        x0 = resize_factor[2] * d['x']
        x1 = resize_factor[2] * (d['x'] + d['width'])
        coords_list.append((z, y0, y1, x0, x1))

    samples = []
    for coords in coords_list:
        z, y0, y1, x0, x1 = coords
        for i in range(40):
            sample_id = uuid4()
            rand_y0 = max(0, int(round(y0 - random.randint(0, 32))))
            rand_y1 = min(isometric_volume.shape[1], int(round(y1 + random.randint(0, 32))))
            rand_x0 = max(0, int(round(x0 - random.randint(0, 32))))
            rand_x1 = min(isometric_volume.shape[2], int(round(x1 + random.randint(0, 32))))
            patch = []
            for zi in range(nstack):
                patch.append(resize(isometric_volume[z+zi, rand_y0:rand_y1, rand_x0:rand_x1], [32, 32],
                                    mode='edge', clip=True, preserve_range=True))
            patch = np.array(patch, dtype=np.float32)
            patch = (patch - mean) / (std + 1e-7)
            patch = np.moveaxis(patch, 0, 2)
            bb_x = (x0 - rand_x0) / (rand_x1 - rand_x0)
            bb_y = (y0 - rand_y0) / (rand_y1 - rand_y0)
            bb_w = (x1 - x0) / (rand_x1 - rand_x0)
            bb_h = (y1 - y0) / (rand_y1 - rand_y0)
            samples.append((patch, bb_x, bb_y, bb_w, bb_h))

    joblib.dump(samples, os.path.join(out_dir, 'samples', '{}.pkl'.format(study_id)))
    return len(samples)
conv_pkl_to_TFRecord.py 文件源码 项目:CIKM2017 作者: heliarmk 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def main():
    # Get the data.
    data_set = joblib.load("/mnt/guankai/CIKM/data/CIKM2017_train/train_Imp_3x3_resampled.pkl")
    for i in range(10):
        data_set = np.random.permutation(data_set)
    valid_data_num = int(len(data_set) / 10) #get 10% data for validation
    valid_set = data_set[0 : valid_data_num ]
    train_set = data_set[valid_data_num  : ]
    convert_to(train_set, "train_Imp_3x3_resampled")
    convert_to(valid_set, "valid_Imp_3x3_resampled")
    return
cvtpkl2csv.py 文件源码 项目:CIKM2017 作者: heliarmk 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def cvt(filename):
    output = joblib.load(filename)
    pred = output["output"]
    outputdir = filename[:filename.rfind("/")+1] + "csvfile/"
    if not os.path.isdir(outputdir):
        os.mkdir(outputdir)
    csvfile = outputdir + filename[filename.rfind("/"):filename.rfind(".")+1] + "csv"
    out = np.array(pred).reshape(2000)
    np.savetxt(fname=csvfile, X=out, fmt="%.3f",delimiter="")
data_agg.py 文件源码 项目:CIKM2017 作者: heliarmk 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def agg(file_name,store_file):

    datas = joblib.load(file_name)
    new_datas = []

    for data in datas:
        new_datas.append(data)
        new_datas.append({"input":np.flip(data["input"],axis=2),"label":data["label"]})
        new_datas.append({"input":np.flip(data["input"],axis=3),"label":data["label"]})
        #new_datas.append({"input":np.rot90(m=data["input"],k=1,axes=(2,3)),"label":data["label"]})
        #new_datas.append({"input":np.rot90(m=data["input"],k=2,axes=(2,3)),"label":data["label"]})
        #new_datas.append({"input":np.rot90(m=data["input"],k=3,axes=(2,3)),"label":data["label"]})

    joblib.dump(value=new_datas,filename=store_file,compress=3)
data_slice.py 文件源码 项目:CIKM2017 作者: heliarmk 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def slice_data(filename):
    data = joblib.load(filename=filename)
    for idx, i in enumerate(data):
        data[idx]["input"] = np.delete(data[idx]["input"],[3],axis=1)
        data[idx]["input"] = data[idx]["input"][:,:,46:55,46:55]
    name, suf = os.path.splitext(filename)
    outputfilename = name + "del_height_no.4_slice_7x7.pkl"
    joblib.dump(value=data, filename=outputfilename)
classification_classifier_chain.py 文件源码 项目:Kaggle_the_Nature_Conservancy_Fisheries_Monitoring 作者: Sapphirine 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def binary_trans(trainy, testy, i):
    y1 = trainy['image_path'].apply(lambda x: 1 if x == int(i) else 0).tolist()
    y2 = testy['image_path'].apply(lambda x: 1 if x == int(i) else 0).tolist()
    return(y1, y2)


# <=======================================================================================================>
# load the data
utils.py 文件源码 项目:Asynchronous-RL-agent 作者: Fritz449 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def load_object(string):
    # converts string to whatever was dumps'ed in it
    return joblib.load(BytesIO(string))
mnist.py 文件源码 项目:Kaleido 作者: vacancy 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def load_data(fname):
    obj = joblib.load(os.path.join('data', fname))
    return obj
test_cache.py 文件源码 项目:senti 作者: stevenxxiu 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_pickle(self):
        joblib.dump(CachedIterable(self.iterator(), 3), 'output')
        self.assertListEqual(list(joblib.load('output')), list(range(20)))
model_cifar.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def normalize_image(image):
  meanstd = joblib.load(FLAGS.mean_std_path)
  mean, std = meanstd['mean'], meanstd['std']
  normed_image = (image - mean) / std
  return normed_image
model_cifar_contract.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def normalize_image(image):
  meanstd = joblib.load(FLAGS.mean_std_path)
  mean, std = meanstd['mean'], meanstd['std']
  normed_image = (image - mean) / std
  return normed_image
lstm_model.py 文件源码 项目:dstc6_dialogue_breakdown_task 作者: JudeLee19 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, input_size, num_hidden, num_classes, utter_embed, bow_utter_embed, config):
        self.input_size = input_size
        self.num_hidden = num_hidden
        self.num_classes = num_classes
        self.utter_embed = utter_embed
        self.bow_utter_embed = bow_utter_embed
        self.logger = config.logger
        self.config = config

        self.cate_mapping_dict = joblib.load('./dbdc3/data/cate_mapping_dict')
utils.py 文件源码 项目:open-database 作者: mitaffinity 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load(filename, mmap_mode=None):
    """Reconstruct a Python object from a file persisted with joblib.dump."""
    f = open(filename, 'rb')
    fobj = _read_fileobject(f, filename, mmap_mode)
    if isinstance(fobj, (str, unicode)):
        return load_compatibility(fobj)

    obj = _unpickle(fobj, filename, mmap_mode)

    return obj
utils.py 文件源码 项目:open-database 作者: mitaffinity 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_compatibility(filename):
    """Reconstruct a Python object from a file persisted with joblib.dump.
    This function ensure the compatibility of joblib old persistence format (<= 0.9.3)"""
    file_handle = open(filename, 'rb')
    unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
    try:
        obj = unpickler.load()
    finally:
        if hasattr(unpickler, 'file_handle'):
            unpickler.file_handle.close()
    return obj
utils.py 文件源码 项目:open-database 作者: mitaffinity 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def load_from_disk(filename):
    return load(filename)
utils.py 文件源码 项目:open-database 作者: mitaffinity 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __init__(self, data_dir):
        self.data_dir = data_dir

        metadata_filename = os.path.join(self.data_dir, "metadata.joblib")
        if os.path.exists(metadata_filename):
            # self.tasks, self.metadata_df = joblib.load(metadata_filename)
            self.tasks, self.metadata_df = load_from_disk(metadata_filename)
        else:
            raise ValueError("No metadata found on disk")


问题


面经


文章

微信
公众号

扫码关注公众号