python类load()的实例源码

layers.py 文件源码 项目:fxnn 作者: khaotik 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def load_params(self, f_, filter_=None):
        di = pickle.load(f_)
        if filter_ is None:
            for k,v in di.items():
                p = self._vars_di[k].get_value(borrow=True)
                if p.shape != v.shape:
                    raise ValueError('Shape mismatch, need %s, got %s'%(v.shape, p.shape), p.shape)
                self._vars_di[k].set_value(v)
        else:
            pat = re.compile(filter_)
            for k,v in di.items():
                if not pat.fullmatch(k): continue
                p = self._vars_di[k].get_value(borrow=True)
                if p.shape != v.shape:
                    raise ValueError('Shape mismatch, need %s, got %s'%(v.shape, p.shape), p.shape)
                self._vars_di[k].set_value(v)
datasets.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def read_data_files(self, subset='train'):
    """Reads from data file and returns images and labels in a numpy array."""
    assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
                           'data')
    if subset == 'train':
      filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
                   for i in xrange(1, 6)]
    elif subset == 'validation':
      filenames = [os.path.join(self.data_dir, 'test_batch')]
    else:
      raise ValueError('Invalid data subset "%s"' % subset)

    inputs = []
    for filename in filenames:
      with gfile.Open(filename, 'r') as f:
        inputs.append(cPickle.load(f))
    # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
    # input format.
    all_images = np.concatenate(
        [each_input['data'] for each_input in inputs]).astype(np.float32)
    all_labels = np.concatenate(
        [each_input['labels'] for each_input in inputs])
    return all_images, all_labels
train_cnn.py 文件源码 项目:logodetect 作者: munibasad 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def read_data():
    with open(PICKLE_FILENAME, 'rb') as f:
        save = pickle.load(f)
        train_dataset = save['train_dataset']
        train_labels = save['train_labels']
        valid_dataset = save['valid_dataset']
        valid_labels = save['valid_labels']
        test_dataset = save['test_dataset']
        test_labels = save['test_labels']
        del save
        print('Training set', train_dataset.shape, train_labels.shape)
        print('Valid set', valid_dataset.shape, valid_labels.shape)
        print('Test set', test_dataset.shape, test_labels.shape)

    return [train_dataset, valid_dataset,
            test_dataset], [train_labels, valid_labels, test_labels]
community.py 文件源码 项目:micom 作者: resendislab 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_pickle(filename):
    """Load a community model from a pickled version.

    Parameters
    ----------
    filename : str
        The file the community is stored in.

    Returns
    -------
    micom.Community
        The loaded community model.

    """
    with open(filename, mode="rb") as infile:
        return pickle.load(infile)
logistic_sgd.py 文件源码 项目:RBM-DBN-theano-DL4J 作者: lzhbrian 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def predict():
    """
    An example of how to load a trained model and use it
    to predict labels.
    """

    # load the saved model
    classifier = pickle.load(open('best_model.pkl'))

    # compile a predictor function
    predict_model = theano.function(
        inputs=[classifier.input],
        outputs=classifier.y_pred)

    # We can test it on some examples from test test
    dataset='mnist.pkl.gz'
    datasets = load_data(dataset)
    test_set_x, test_set_y = datasets[2]
    test_set_x = test_set_x.get_value()

    predicted_values = predict_model(test_set_x[:10])
    print("Predicted values for the first 10 examples in test set:")
    print(predicted_values)
utils.py 文件源码 项目:MIL.pytorch 作者: gujiuxiang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def history_infos(opt):
    infos = {}
    if len(opt.start_from) != 0:  # open old infos and check if models are compatible
        model_id = opt.start_from
        infos_id = model_id.replace('save/', '') + '.infos-best.pkl'
        with open(os.path.join(opt.start_from, infos_id)) as f:
            infos = cPickle.load(f)
            saved_model_opt = infos['opt']

    iteration = infos.get('iter', 0)
    epoch = infos.get('epoch', 0)
    val_result_history = infos.get('val_result_history', {})
    loss_history = infos.get('loss_history', {})
    lr_history = infos.get('lr_history', {})
    best_val_score = infos.get('best_val_score', None) if opt.load_best_score == 1 else 0
    val_loss = 0.0
    val_history = [val_result_history, best_val_score, val_loss]
    train_history = [loss_history, lr_history]
    return opt, infos, iteration, epoch, val_history, train_history
cifar.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def load_batch(fpath, label_key='labels'):
    f = open(fpath, 'rb')
    if sys.version_info < (3,):
        d = cPickle.load(f)
    else:
        d = cPickle.load(f, encoding="bytes")
        # decode utf8
        d_decoded = {}
        for k, v in d.items():
            d_decoded[k.decode("utf8")] = v
        d = d_decoded
    f.close()
    data = d["data"]
    labels = d[label_key]

    data = data.reshape(data.shape[0], 3, 32, 32)
    return data, labels
indra.py 文件源码 项目:pybel 作者: pybel 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def from_indra_pickle(path, name=None, version=None, description=None):
    """Imports a model from :mod:`indra`.

    :param str path: Path to pickled list of :class:`indra.statements.Statement`
    :param str name: The name for the BEL graph
    :param str version: The version of the BEL graph
    :param str description: The description of the BEL graph
    :rtype: pybel.BELGraph
    """
    with open(path, 'rb') as f:
        statements = load(f)

    return from_indra_statements(
        statements=statements,
        name=name,
        version=version,
        description=description
    )
mypeda.py 文件源码 项目:mgtools 作者: miyagaw61 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def restore_snapshot(self, filename=None):
        """
        Restore a saved snapshot of current process from file
        Warning: this is not thread safe, do not use with multithread program

        Args:
            - file: saved snapshot

        Returns:
            - Bool
        """
        if not filename:
            filename = self.get_config_filename("snapshot")

        fd = open(filename, "rb")
        snapshot = pickle.load(fd)
        return self.give_snapshot(snapshot)


    #########################
    #   Memory Operations   #
    #########################
logistic_sgd.py 文件源码 项目:tbp-next-basket 作者: GiulioRossetti 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def predict():
    """
    An example of how to load a trained model and use it
    to predict labels.
    """

    # load the saved model
    classifier = pickle.load(open('best_model.pkl'))

    # compile a predictor function
    predict_model = theano.function(
        inputs=[classifier.input],
        outputs=classifier.y_pred)

    # We can test it on some examples from test test
    dataset='mnist.pkl.gz'
    datasets = load_data(dataset)
    test_set_x, test_set_y = datasets[2]
    test_set_x = test_set_x.get_value()

    predicted_values = predict_model(test_set_x[:10])
    print("Predicted values for the first 10 examples in test set:")
    print(predicted_values)
image_processing.py 文件源码 项目:ML-Project 作者: Shiam-Chowdhury 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
  dataset_names = []
  folders_list = os.listdir(data_folders)
  for folder in folders_list:

    #print(os.path.join(data_folders, folder))
    curr_folder_path = os.path.join(data_folders, folder)
    if os.path.isdir(curr_folder_path):
        set_filename = curr_folder_path + '.pickle'
        dataset_names.append(set_filename)
        if os.path.exists(set_filename) and not force:
          # You may override by setting force=True.
          print('%s already present - Skipping pickling.' % set_filename)
        else:
          print('Pickling %s.' % set_filename)
          dataset = load_letter(curr_folder_path, min_num_images_per_class) # load and normalize the data
          try:
            with open(set_filename, 'wb') as f:
                pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
                f.close()
          except Exception as e:
            print('Unable to save data to', set_filename, ':', e)

  return dataset_names
read_PascalVocData.py 文件源码 项目:FCN-GoogLeNet 作者: DeepSegment 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def read_dataset(data_dir):
    pickle_filename = "PascalVoc.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_tarfile=True)
        PascalVoc_folder = "VOCdevkit"
        result = create_image_lists(os.path.join(data_dir, PascalVoc_folder))
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_records = result['training']
        validation_records = result['validation']
        del result

    return training_records, validation_records
read_MITSceneParsingData.py 文件源码 项目:FCN-GoogLeNet 作者: DeepSegment 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_dataset(data_dir):
    pickle_filename = "MITSceneParsing.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
        SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
        result = create_image_lists(os.path.join(data_dir, SceneParsing_folder))
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_records = result['training']
        validation_records = result['validation']
        del result

    return training_records, validation_records
timit.py 文件源码 项目:DaNet-Tensorflow 作者: khaotik 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def install_and_load(self):
        # TODO automatically install if fails to find anything
        FILE_NOT_FOUND_MSG = (
            'Did not found TIMIT file "%s"'
            ', make sure you download and install the dataset')
        self.subset = {}
        path = os.path.join(os.path.dirname(__file__), 'TIMIT', '%s_set.pkl')
        for subset in ['train', 'test']:
            filepath = path % subset
            if not os.path.exists(filepath):
                raise IOError(
                    FILE_NOT_FOUND_MSG % filepath)

            with open(filepath, 'rb') as f:
                gc.disable()
                all_data = [pickle.load(f)]
                all_data.append(pickle.load(f))
                all_data.append(pickle.load(f))
                gc.enable()
            self.subset[subset] = all_data

        # use same subset for validation / test
        # as TIMIT is small
        self.subset['valid'] = self.subset['test']
read_celebADataset.py 文件源码 项目:EBGAN.tensorflow 作者: shekkizh 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_dataset(data_dir):
    pickle_filename = "celebA.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
        celebA_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
        result = create_image_lists(os.path.join(data_dir, celebA_folder))
        print ("Training set: %d" % len(result['train']))
        print ("Test set: %d" % len(result['test']))
        print ("Validation set: %d" % len(result['validation']))
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_images = result['train']
        testing_images = result['test']
        validation_images = result['validation']

        del result
    return training_images, testing_images, validation_images
logistic_sgd.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def predict():
    """
    An example of how to load a trained model and use it
    to predict labels.
    """

    # load the saved model
    classifier = pickle.load(open('best_model.pkl'))

    # compile a predictor function
    predict_model = theano.function(
        inputs=[classifier.input],
        outputs=classifier.y_pred)

    # We can test it on some examples from test test
    dataset='mnist.pkl.gz'
    datasets = load_data(dataset)
    test_set_x, test_set_y = datasets[2]
    test_set_x = test_set_x.get_value()

    predicted_values = predict_model(test_set_x[:10])
    print("Predicted values for the first 10 examples in test set:")
    print(predicted_values)
reuters.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_word_index(path='reuters_word_index.pkl'):
    """Retrieves the dictionary mapping word indices back to words.

    # Arguments
        path: where to cache the data (relative to `~/.keras/dataset`).

    # Returns
        The word index dictionary.
    """
    path = get_file(path, origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.pkl')
    f = open(path, 'rb')

    if sys.version_info < (3,):
        data = cPickle.load(f)
    else:
        data = cPickle.load(f, encoding='latin1')

    f.close()
    return data
imdb.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_word_index(path='imdb_word_index.pkl'):
    """Retrieves the dictionary mapping word indices back to words.

    # Arguments
        path: where to cache the data (relative to `~/.keras/dataset`).

    # Returns
        The word index dictionary.
    """
    path = get_file(path,
                    origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.pkl',
                    md5_hash='72d94b01291be4ff843198d3b0e1e4d7')
    f = open(path, 'rb')

    if sys.version_info < (3,):
        data = cPickle.load(f)
    else:
        data = cPickle.load(f, encoding='latin1')

    f.close()
    return data
mnist.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_data(path='mnist.pkl.gz'):
    """Loads the MNIST dataset.

    # Arguments
        path: path where to cache the dataset locally
            (relative to ~/.keras/datasets).

    # Returns
        Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
    """
    path = get_file(path, origin='https://s3.amazonaws.com/img-datasets/mnist.pkl.gz')

    if path.endswith('.gz'):
        f = gzip.open(path, 'rb')
    else:
        f = open(path, 'rb')

    if sys.version_info < (3,):
        data = cPickle.load(f)
    else:
        data = cPickle.load(f, encoding='bytes')

    f.close()
    return data  # (x_train, y_train), (x_test, y_test)
sample.py 文件源码 项目:sequelspeare 作者: raidancampbell 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __init__(self, save_dir=SAVE_DIR, prime_text=PRIME_TEXT, num_sample_symbols=NUM_SAMPLE_SYMBOLS):
        self.save_dir = save_dir
        self.prime_text = prime_text
        self.num_sample_symbols = num_sample_symbols
        with open(os.path.join(Sampler.SAVE_DIR, 'chars_vocab.pkl'), 'rb') as file:
            self.chars, self.vocab = cPickle.load(file)
            self.model = Model(len(self.chars), is_sampled=True)

            # polite GPU memory allocation: don't grab everything you can.
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            config.gpu_options.allocator_type = 'BFC'
            self.sess = tf.Session(config=config)

            tf.initialize_all_variables().run(session=self.sess)
            self.checkpoint = tf.train.get_checkpoint_state(self.save_dir)
            if self.checkpoint and self.checkpoint.model_checkpoint_path:
                tf.train.Saver(tf.all_variables()).restore(self.sess, self.checkpoint.model_checkpoint_path)
session.py 文件源码 项目:smhr 作者: andycasey 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def update_default_setting(self, key_tree, value):
        """
        Update a default value in the local settings file.

        :param key_tree:
            A tuple containing a tree of dictionary keys.

        :param value:
            The value for the setting.
        """

        # Open the defaults.
        with open(self._default_settings_path, "rb") as fp:
            defaults = yaml.load(fp)

        branch = defaults
        for key in key_tree[:-1]:
            branch.setdefault(key, {})
            branch = branch[key]
        branch[key_tree[-1]] = value

        with open(self._default_settings_path, "w") as fp:
            fp.write(yaml.dump(defaults))

        return True
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def model_from_yaml(yaml_string, custom_objects=None):
      """Parses a yaml model configuration file and returns a model instance.

      Arguments:
          yaml_string: YAML string encoding a model configuration.
          custom_objects: Optional dictionary mapping names
              (strings) to custom classes or functions to be
              considered during deserialization.

      Returns:
          A Keras model instance (uncompiled).

      Raises:
          ImportError: if yaml module is not found.
      """
      if yaml is None:
        raise ImportError('Requires yaml module installed.')
      config = yaml.load(yaml_string)
      return layer_module.deserialize(config, custom_objects=custom_objects)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def to_yaml(self, **kwargs):
            """Returns a yaml string containing the network configuration.

            To load a network from a yaml save file, use
            `keras.models.model_from_yaml(yaml_string, custom_objects={})`.

            `custom_objects` should be a dictionary mapping
            the names of custom losses / layers / etc to the corresponding
            functions / classes.

            Arguments:
                **kwargs: Additional keyword arguments
                    to be passed to `yaml.dump()`.

            Returns:
                A YAML string.

            Raises:
                ImportError: if yaml module is not found.
            """
            if yaml is None:
              raise ImportError('Requires yaml module installed.')
            return yaml.dump(self._updated_config(), **kwargs)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_data(path='mnist.npz'):
          """Loads the MNIST dataset.

          Arguments:
              path: path where to cache the dataset locally
                  (relative to ~/.keras/datasets).

          Returns:
              Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
          """
          path = get_file(
              path, origin='https://s3.amazonaws.com/img-datasets/mnist.npz')
          f = np.load(path)
          x_train = f['x_train']
          y_train = f['y_train']
          x_test = f['x_test']
          y_test = f['y_test']
          f.close()
          return (x_train, y_train), (x_test, y_test)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_word_index(path='reuters_word_index.json'):
          """Retrieves the dictionary mapping word indices back to words.

          Arguments:
              path: where to cache the data (relative to `~/.keras/dataset`).

          Returns:
              The word index dictionary.
          """
          path = get_file(
              path,
              origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json')
          f = open(path)
          data = json.load(f)
          f.close()
          return data
learningAgent.py 文件源码 项目:Japan_Mahjong-AI-project 作者: willywsm1013 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self,player_number,epsilon=0.5,discount=0.8,alpha=1e-4,mode='test',pickle_name = None,lr_decay_fn = None):
        Agent.__init__(self,player_number)
        self.epsilon = epsilon
        self.discount = discount
        self.alpha = alpha
        self.setLearningTarget()
        self.lr_decay = lr_decay_fn
        if mode == 'train':
            self.train = True
        elif mode == 'test':
            self.train = False
        else:
            print ('no mode \'',mode,'\' for QlearningAgent')
            raise Exception

        self.reset()
        if pickle_name != None:
            self.load(pickle_name)
        print ('epsilon :',self.epsilon)   
        print ('learning :',self.alpha)
        print ('discount :',self.discount)
sample.py 文件源码 项目:jaylyrics_generation_tensorflow 作者: hundred06 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def sample(args):
    # import configuration
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:
        words, vocab = cPickle.load(f)
    # import the trained model
    model = Model(saved_args, True)
    with tf.Session() as sess:
    # initialize the model
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        # sample the new sequence word by word
            literature = model.sample(sess, words, vocab, args.n, args.start, args.sample)
    with codecs.open('result/sequence.txt','a','utf-8') as f:
        f.write(literature+'\n\n')
    print(literature)
data_loader.py 文件源码 项目:tensorflow_image_tutorial 作者: ybenoit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load(self):

        with open(os.path.join(self.data_dir, "notMNIST.pickle"), 'rb') as f:
            save = pickle.load(f)
            train_dataset = save['train_dataset']
            train_labels = save['train_labels']
            valid_dataset = save['valid_dataset']
            valid_labels = save['valid_labels']
            test_dataset = save['test_dataset']
            test_labels = save['test_labels']
            del save  # hint to help gc free up memory

        train_dataset, train_labels = self.reformat(train_dataset, train_labels, self.image_size, self.num_labels)
        valid_dataset, valid_labels = self.reformat(valid_dataset, valid_labels, self.image_size, self.num_labels)
        test_dataset, test_labels = self.reformat(test_dataset, test_labels, self.image_size, self.num_labels)

        return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
sample.py 文件源码 项目:word-rnn-tf 作者: jtoy 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def sample(args):
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
        chars, vocab = cPickle.load(f)
    model = Model(saved_args, True)
    val_loss_file = args.save_dir + '/val_loss.json'
    with tf.Session() as sess:
        saver = tf.train.Saver(tf.all_variables())
        if os.path.exists(val_loss_file):
            with open(val_loss_file, "r") as text_file:
                text = text_file.read()
                loss_json = json.loads(text)
                losses = loss_json.keys()
                losses.sort(key=lambda x: float(x))
                loss = losses[0]
                model_checkpoint_path =  loss_json[loss]['checkpoint_path']
                #print(model_checkpoint_path)
                saver.restore(sess, model_checkpoint_path)
                result = model.sample(sess, chars, vocab, args.n, args.prime, args.sample_rule, args.temperature)
                print(result) #add this back in later, not sure why its not working
                output = "/data/output/"+ str(int(time.time())) + ".txt"
                with open(output, "w") as text_file:
                    text_file.write(result)
                print(output)
opt.py 文件源码 项目:PyPSA 作者: PyPSA 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def empty_network(network):
    logger.debug("Storing pypsa timeseries to disk")
    from .components import all_components

    panels = {}
    for c in all_components:
        attr = network.components[c]["list_name"] + "_t"
        panels[attr] = getattr(network, attr)
        setattr(network, attr, None)

    fd, fn = tempfile.mkstemp()
    with os.fdopen(fd, 'wb') as f:
        pickle.dump(panels, f, -1)

    del panels

    gc.collect()
    yield

    logger.debug("Reloading pypsa timeseries from disk")
    with open(fn, 'rb') as f:
        panels = pickle.load(f)
    os.remove(fn)
    for attr, pnl in iteritems(panels):
        setattr(network, attr, pnl)


问题


面经


文章

微信
公众号

扫码关注公众号