python类dump()的实例源码

logs.py 文件源码 项目:ScraXBRL 作者: tooksoi 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def add_extract_data(symbol, extract_data, complete):
    """Add data regarding scrape or extract to master log."""

    if complete:
        complete_key = 'complete'
    else:
        complete_key = 'incomplete'
    data_log = pickle.load(open(settings.EXTRACT_LOG_FILE_PATH, "rb"))
    try:
        data_log[symbol]
        data_log[symbol][complete_key].append(extract_data)
    except KeyError:
        data_log[symbol] = {}
        data_log[symbol]['complete'] = []
        data_log[symbol]['incomplete'] = []
        data_log[symbol][complete_key].append(extract_data)
    pickle.dump(data_log, open(settings.EXTRACT_LOG_FILE_PATH, "wb"))
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def save(self, fname=None):
        """Save the parameters of the agent to a file."""
        fname = self.opt.get('model_file', None) if fname is None else fname

        if fname:
            if self.model_type == 'nn':
                print("[ saving model: " + fname + " ]")
                self.model.save_weights(fname + '.h5')
                self.embedding_dict.save_items(fname)

            if self.model_type == 'ngrams':
                print("[ saving model: " + fname + " ]")
                with open(fname + '_cls.pkl', 'wb') as model_file:
                    pickle.dump(self.model, model_file)

            with open(fname + '_opt.json', 'w') as opt_file:
                json.dump(self.opt, opt_file)
flora.py 文件源码 项目:flora 作者: Lamden 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def generate(location):
    # cli wizard for creating a new contract from a template
    if directory_has_smart_contract(location):
        example_payload = json.load(open(glob.glob(os.path.join(location, '*.json'))[0]))
        print(example_payload)
        for k, v in example_payload.items():
            value = input(k + ':')
            if value != '':
                example_payload[k] = value
        print(example_payload)

        code_path = glob.glob(os.path.join(location, '*.tsol'))
        tsol.compile(open(code_path[0]), example_payload)
        print('Code compiles with new payload.')
        selection = ''
        while True:
            selection = input('(G)enerate Solidity contract or (E)xport implementation:')
            if selection.lower() == 'g':
                output_name = input('Name your contract file without an extension:')
                code = tsol.generate_code(open(code_path[0]).read(), example_payload)
                open(os.path.join(location, '{}.sol'.format(output_name)), 'w').write(code)
                break

            if selection.lower() == 'e':
                output_name = input('Name your implementation file without an extension:')
                json.dump(example_payload, open(os.path.join(location, '{}.json'.format(output_name)), 'w'))
                break
    else:
        print('Provided directory does not contain a *.tsol and *.json or does not compile.')
workflow.py 文件源码 项目:alfred-mpd 作者: deanishe 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def register(self, name, serializer):
        """Register ``serializer`` object under ``name``.

        Raises :class:`AttributeError` if ``serializer`` in invalid.

        .. note::

            ``name`` will be used as the file extension of the saved files.

        :param name: Name to register ``serializer`` under
        :type name: ``unicode`` or ``str``
        :param serializer: object with ``load()`` and ``dump()``
            methods

        """
        # Basic validation
        getattr(serializer, 'load')
        getattr(serializer, 'dump')

        self._serializers[name] = serializer
workflow.py 文件源码 项目:alfred-mpd 作者: deanishe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open JSON file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: JSON-serializable data structure
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """
        return json.dump(obj, file_obj, indent=2, encoding='utf-8')
workflow.py 文件源码 项目:alfred-mpd 作者: deanishe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open pickle file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: Python object
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """
        return cPickle.dump(obj, file_obj, protocol=-1)
workflow.py 文件源码 项目:alfred-mpd 作者: deanishe 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open pickle file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: Python object
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """
        return pickle.dump(obj, file_obj, protocol=-1)


# Set up default manager and register built-in serializers
workflow.py 文件源码 项目:alfred-mpd 作者: deanishe 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cache_data(self, name, data):
        """Save ``data`` to cache under ``name``.

        If ``data`` is ``None``, the corresponding cache file will be
        deleted.

        :param name: name of datastore
        :param data: data to store. This may be any object supported by
                the cache serializer

        """
        serializer = manager.serializer(self.cache_serializer)

        cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))

        if data is None:
            if os.path.exists(cache_path):
                os.unlink(cache_path)
                self.logger.debug('Deleted cache file : %s', cache_path)
            return

        with atomic_writer(cache_path, 'wb') as file_obj:
            serializer.dump(data, file_obj)

        self.logger.debug('Cached data saved at : %s', cache_path)
audio_converter.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def generateDatasets(train_files, cut_data, len_mfcc, step_mfcc, hop_len, freq):

    X, Y = [], []

    for tf in train_files:

        train_data, labels = generateSingleDataset(tf, cut_data, len_mfcc, step_mfcc, hop_len, freq)

        X.append(train_data)
        Y.append(labels)

    X = np.concatenate(X)
    Y = np.concatenate(Y)

    if cut_data:
        filename = STORE_DIR + 'dataset_CUT_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
    else:
        filename = STORE_DIR + 'dataset_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
    print filename
    with open(filename, 'w') as f:
        pickle.dump([X, Y], f)

    return X, Y


# Generate a dataset from all available files
train_nets.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def sotreResults(results, v):
# %%
    import pickle

    with open('test_results_'+v+'.pickle', 'w') as f:
        pickle.dump(results, f)


# %% 

# Plot stored training statistics. Look for the best model
utils.py 文件源码 项目:DeepAnomaly 作者: adiyoss 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def store_prediction_and_ground_truth(model):
    input_size = 1
    maxlen = 140
    batch_size = 32

    db = read_data('../data/ECG5000_TEST_PHASE_1_CONTINUOUS_SIGNAL_1.pkl')
    X = create_sequences(db[:-140], win_size=maxlen, step=maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], input_size))
    Y = create_sequences(db[140:], win_size=maxlen, step=maxlen).flatten()

    prediction = model.predict(X, batch_size, verbose=1)
    prediction = prediction.flatten()
    with open('../data/ECG5000_TRAIN_PHASE_2_CONTINUOUS_SIGNAL_1.pkl', 'wb') as f:
        pickle.dump(np.stack((Y, prediction)), f)
utils.py 文件源码 项目:DeepAnomaly 作者: adiyoss 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def prepare_data():
    test_data = read_data("../data/ECG5000_TEST_CONTINUOUS_SIGNAL_1.pkl")
    test_data_half_len = int(len(test_data) / 2)

    with open("../data/ECG5000_TEST_PHASE_1_CONTINUOUS_SIGNAL_1.pkl", "wb") as f:
        pickle.dump(test_data[:test_data_half_len], f)

    with open("../data/ECG5000_TEST_PHASE_2_CONTINUOUS_SIGNAL_1.pkl", "wb") as f:
        pickle.dump(test_data[test_data_half_len:], f)
roborally.py 文件源码 项目:robot-arena 作者: kenganong 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def log_results(state):
  if config.print_results:
    print('Final Results!')
    for brain in sorted(state.brains, key = lambda x: x.placement):
      print('{}. {}  with {} flags (scored: {})  surviving {} iterations ({} robots left)'.format(brain.placement,
            brain.name, brain.max_flag, brain.total_flags, brain.iterations_survived, brain.robots_alive))
  if config.save_replay:
    filename = 'roborally/replays/{}.pickle'.format(replay['name'])
    os.makedirs(os.path.dirname(filename), exist_ok=True)
    with open(filename, 'wb') as replay_file:
      pickle.dump(replay, replay_file)
preposition_model.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def save_model(self, epoch):
        '''
        Saves the current model using the epoch id to identify the file.
        '''
        self.model.save("%s_%d.model" % (self.model_name_prefix, epoch))
        pickle.dump(self.data_processor, open("%s.dataproc" % self.model_name_prefix, "wb"))
model_pp_relation.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def save_model(self, epoch):
        pickle.dump(self.label_map, open("%s.label_map" % self.model_name_prefix, "wb"))
        super(PPRelationModel, self).save_model(epoch)
model_entailment.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def save_model(self, epoch):
        '''
        Saves the current model using the epoch id to identify the file.
        '''
        self.model.save("%s_%d.model" % (self.model_name_prefix, epoch))
        pickle.dump(self.data_processor, open("%s.dataproc" % self.model_name_prefix, "wb"))
        pickle.dump(self.label_map, open("%s.labelmap" % self.model_name_prefix, "wb"))
data.py 文件源码 项目:DREAM 作者: LaceyChen17 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_users_orders(self, prior_or_train):
        '''
            get users' prior detailed orders
        '''
        if os.path.exists(self.cache_dir + 'users_orders.pkl'):
            with open(self.cache_dir + 'users_orders.pkl', 'rb') as f:
                users_orders = pickle.load(f)
        else:
            orders = self.get_orders()
            order_products_prior = self.get_orders_items(prior_or_train)
            users_orders = pd.merge(order_products_prior, orders[['user_id', 'order_id', 'order_number', 'days_up_to_last']], 
                        on = ['order_id'], how = 'left')
            with open(self.cache_dir + 'users_orders.pkl', 'wb') as f:
                pickle.dump(users_orders, f, pickle.HIGHEST_PROTOCOL)
        return users_orders
data.py 文件源码 项目:DREAM 作者: LaceyChen17 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_users_products(self, prior_or_train):
        '''
            get users' all purchased products
        '''
        if os.path.exists(self.cache_dir + 'users_products.pkl'):
            with open(self.cache_dir + 'users_products.pkl', 'rb') as f:
                users_products = pickle.load(f)
        else:
            users_products = self.get_users_orders(prior_or_train)[['user_id', 'product_id']].drop_duplicates()
            users_products['product_id'] = users_products.product_id.astype(int)
            users_products['user_id'] = users_products.user_id.astype(int)
            users_products = users_products.groupby(['user_id'])['product_id'].apply(list).reset_index()
            with open(self.cache_dir + 'users_products.pkl', 'wb') as f:
                pickle.dump(users_products, f, pickle.HIGHEST_PROTOCOL)
        return users_products
data.py 文件源码 项目:DREAM 作者: LaceyChen17 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_baskets(self, prior_or_train, reconstruct = False, reordered = False, none_idx = 49689):
        '''
            get users' baskets
        '''
        if reordered:
            filepath = self.cache_dir + './reorder_basket_' + prior_or_train + '.pkl'
        else:
            filepath = self.cache_dir + './basket_' + prior_or_train + '.pkl'

        if (not reconstruct) and os.path.exists(filepath):
            with open(filepath, 'rb') as f:
                up_basket = pickle.load(f)
        else:          
            up = self.get_users_orders(prior_or_train).sort_values(['user_id', 'order_number', 'product_id'], ascending = True)
            uid_oid = up[['user_id', 'order_number']].drop_duplicates()
            up = up[up.reordered == 1][['user_id', 'order_number', 'product_id']] if reordered else up[['user_id', 'order_number', 'product_id']]
            up_basket = up.groupby(['user_id', 'order_number'])['product_id'].apply(list).reset_index()
            up_basket = pd.merge(uid_oid, up_basket, on = ['user_id', 'order_number'], how = 'left')
            for row in up_basket.loc[up_basket.product_id.isnull(), 'product_id'].index:
                up_basket.at[row, 'product_id'] = [none_idx]
            up_basket = up_basket.sort_values(['user_id', 'order_number'], ascending = True).groupby(['user_id'])['product_id'].apply(list).reset_index()
            up_basket.columns = ['user_id', 'reorder_basket'] if reordered else ['user_id', 'basket']
            #pdb.set_trace()
            with open(filepath, 'wb') as f:
                pickle.dump(up_basket, f, pickle.HIGHEST_PROTOCOL)
        return up_basket
parse_indepexpends.py 文件源码 项目:SuperPACs 作者: SpencerNorris 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def donations(filename='donationdata.pickle'):

    try:
        print("donation data pickled already. Grabbing data from donationdata.picke")
        with open(filename, 'rb') as handle:
            donations = pickle.load(handle)
        return donations
    except EOFError:
        print("donation data not pickled, grabbing directly from FEC and ProPublica APIs")
        donations = donations_helper()

        with open(filename, 'wb') as handle:
            pickle.dump(donations, handle, protocol=pickle.HIGHEST_PROTOCOL)

        return donations


问题


面经


文章

微信
公众号

扫码关注公众号