python类GPUOptions()的实例源码

trainer.py 文件源码 项目:pointer-network-tensorflow 作者: devsisters 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_session(self):
    self.saver = tf.train.Saver()
    self.summary_writer = tf.summary.FileWriter(self.model_dir)

    sv = tf.train.Supervisor(logdir=self.model_dir,
                             is_chief=True,
                             saver=self.saver,
                             summary_op=None,
                             summary_writer=self.summary_writer,
                             save_summaries_secs=300,
                             save_model_secs=self.checkpoint_secs,
                             global_step=self.model.global_step)

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=self.gpu_memory_fraction,
        allow_growth=True) # seems to be not working
    sess_config = tf.ConfigProto(allow_soft_placement=True,
                                 gpu_options=gpu_options)

    self.sess = sv.prepare_or_wait_for_session(config=sess_config)
ops.py 文件源码 项目:dcgan 作者: zsdonghao 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def set_gpu_fraction(sess=None, gpu_fraction=0.3):
    """Set the GPU memory fraction for the application.

    Parameters
    ----------
    sess : a session instance of TensorFlow
        TensorFlow session
    gpu_fraction : a float
        Fraction of GPU memory, (0 ~ 1]

    References
    ----------
    - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_
    """
    print("  tensorlayer: GPU MEM Fraction %f" % gpu_fraction)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
    sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
    return sess
ops.py 文件源码 项目:Image-Captioning 作者: zsdonghao 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def set_gpu_fraction(sess=None, gpu_fraction=0.3):
    """Set the GPU memory fraction for the application.

    Parameters
    ----------
    sess : a session instance of TensorFlow
        TensorFlow session
    gpu_fraction : a float
        Fraction of GPU memory, (0 ~ 1]

    References
    ----------
    - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_
    """
    print("  tensorlayer: GPU MEM Fraction %f" % gpu_fraction)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
    sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
    return sess
ops.py 文件源码 项目:Image-Captioning 作者: zsdonghao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def set_gpu_fraction(sess=None, gpu_fraction=0.3):
    """Set the GPU memory fraction for the application.

    Parameters
    ----------
    sess : a session instance of TensorFlow
        TensorFlow session
    gpu_fraction : a float
        Fraction of GPU memory, (0 ~ 1]

    References
    ----------
    - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_
    """
    print("  tensorlayer: GPU MEM Fraction %f" % gpu_fraction)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
    sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
    return sess
tf_t2t.py 文件源码 项目:sgnmt 作者: ucam-smt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _session_config(self):
        """Creates the session config with t2t default parameters."""
        graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions(
            opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
        if self._single_cpu_thread:
            config = tf.ConfigProto(
                intra_op_parallelism_threads=1,
                inter_op_parallelism_threads=1,
                allow_soft_placement=True,
                graph_options=graph_options,
                log_device_placement=False)
        else:
            gpu_options = tf.GPUOptions(
                per_process_gpu_memory_fraction=0.95)
            config = tf.ConfigProto(
                allow_soft_placement=True,
                graph_options=graph_options,
                gpu_options=gpu_options,
                log_device_placement=False)
        return config
tf_nizza.py 文件源码 项目:sgnmt 作者: ucam-smt 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _session_config(self):
        """Creates the session config with t2t default parameters."""
        graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions(
            opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
        if self._single_cpu_thread:
            config = tf.ConfigProto(
                intra_op_parallelism_threads=1,
                inter_op_parallelism_threads=1,
                allow_soft_placement=True,
                graph_options=graph_options,
                log_device_placement=False)
        else:
            gpu_options = tf.GPUOptions(
                per_process_gpu_memory_fraction=0.95)
            config = tf.ConfigProto(
                allow_soft_placement=True,
                graph_options=graph_options,
                gpu_options=gpu_options,
                log_device_placement=False)
        return config
demo_yolo_v2.py 文件源码 项目:yolov2-tensorflow 作者: shishichang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self):
        model_name = 'yolov2-coco'
        model_dir = './models'
        gpu_id = 4
        self.gpu_utility = 0.9

        self.pb_file = '{}/{}.pb'.format(model_dir, model_name)
        self.meta_file = '{}/{}.meta'.format(model_dir, model_name)
        self.batch = 4

        self.graph = tf.Graph()
        with tf.device('/gpu:1'):
            with self.graph.as_default() as g:
                self.build_from_pb()
                gpu_options = tf.GPUOptions(allow_growth=True)
                sess_config = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)
                self.sess = tf.Session(config = sess_config)
                self.sess.run(tf.global_variables_initializer())
        return
demo_yolo_v2.py 文件源码 项目:yolov2-tensorflow 作者: shishichang 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def setup_meta_ops(self):
        cfg = dict({
            'allow_soft_placement': False,
            'log_device_placement': False
            })
        utility = min(self.gpu_utility, 1.0)
        if utility > 0.0:
            print('GPU model with {} usage'.format(utility))
            cfg['gpu_options'] = tf.GPUOptions(per_process_gpu_memory_fraction = utility)        
            cfg['allow_soft_placement'] = True
        else:
            print('Run totally on CPU')
            cfg['device_count'] = {'GPU': 0}

        self.sess = tf.Session(config = tf.ConfigProto(**cfg))
        self.sess.run(tf.global_variables_initializer())
plain_cnn.py 文件源码 项目:cifar10-tensorflow 作者: persistforever 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test(self, dataloader, backup_path, epoch, batch_size=128):
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # ????
        self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
        model_path = os.path.join(backup_path, 'model_%d.ckpt' % (epoch))
        assert(os.path.exists(model_path+'.index'))
        self.saver.restore(self.sess, model_path)
        print('read model from %s' % (model_path))
        # ??????????
        accuracy_list = []
        test_images = dataloader.data_augmentation(dataloader.test_images,
            flip=False, crop=True, crop_shape=(24,24,3), whiten=True, noise=False)
        test_labels = dataloader.test_labels
        for i in range(0, dataloader.n_test, batch_size):
            batch_images = test_images[i: i+batch_size]
            batch_labels = test_labels[i: i+batch_size]
            [avg_accuracy] = self.sess.run(
                fetches=[self.accuracy], 
                feed_dict={self.images:batch_images, 
                           self.labels:batch_labels,
                           self.keep_prob:1.0})
            accuracy_list.append(avg_accuracy)
        print('test precision: %.4f' % (numpy.mean(accuracy_list)))
        self.sess.close()
network.py 文件源码 项目:cifar10-tensorflow 作者: persistforever 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test(self, backup_path, epoch, batch_size=128):
        saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.45)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # ????
        model_path = os.path.join(backup_path, 'model_%d.ckpt' % (epoch))
        assert(os.path.exists(model_path+'.index'))
        saver.restore(sess, model_path)
        print('read model from %s' % (model_path))
        # ??????????
        precision = []
        for batch in range(int(cifar10.test.num_examples / batch_size)):
            batch_image, batch_label = cifar10.test.next_batch(batch_size)
            [precision_onebatch] = sess.run(
                fetches=[self.accuracy], 
                feed_dict={self.image:batch_image, 
                           self.label:batch_label,
                           self.keep_prob:1.0})
            precision.append(precision_onebatch)
        print('test precision: %.4f' % (numpy.mean(precision)))
basic_cnn.py 文件源码 项目:cifar10-tensorflow 作者: persistforever 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test(self, dataloader, backup_path, epoch, batch_size=128):
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # ????
        self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
        model_path = os.path.join(backup_path, 'model_%d.ckpt' % (epoch))
        assert(os.path.exists(model_path+'.index'))
        self.saver.restore(self.sess, model_path)
        print('read model from %s' % (model_path))
        # ??????????
        accuracy_list = []
        test_images = dataloader.data_augmentation(dataloader.test_images,
            flip=False, crop=True, crop_shape=(24,24,3), whiten=True, noise=False)
        test_labels = dataloader.test_labels
        for i in range(0, dataloader.n_test, batch_size):
            batch_images = test_images[i: i+batch_size]
            batch_labels = test_labels[i: i+batch_size]
            [avg_accuracy] = self.sess.run(
                fetches=[self.accuracy], 
                feed_dict={self.images:batch_images, 
                           self.labels:batch_labels,
                           self.keep_prob:1.0})
            accuracy_list.append(avg_accuracy)
        print('test precision: %.4f' % (numpy.mean(accuracy_list)))
        self.sess.close()
main.py 文件源码 项目:VQG-tensorflow 作者: JamesChuanggg 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main(_):

    attrs = conf.__dict__['__flags']
    pp(attrs)

    dataset, img_feature, train_data = get_data(conf.input_json, conf.input_img_h5, conf.input_ques_h5, conf.img_norm)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=calc_gpu_fraction(conf.gpu_fraction))

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        model = question_generator.Question_Generator(sess, conf, dataset, img_feature, train_data)

        if conf.is_train:
            model.build_model()
        model.train()
    else:
        model.build_generator()
        model.test(test_image_path=conf.test_image_path, model_path=conf.test_model_path, maxlen=26)
train_ensemble.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, cluster, task, train_dir, log_device_placement=True):
    """"Creates a Trainer.

    Args:
      cluster: A tf.train.ClusterSpec if the execution is distributed.
        None otherwise.
      task: A TaskSpec describing the job type and the task index.
    """

    self.cluster = cluster
    self.task = task
    self.is_master = (task.type == "master" and task.index == 0)
    self.train_dir = train_dir
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu)
    self.config = tf.ConfigProto(log_device_placement=log_device_placement)

    if self.is_master and self.task.index > 0:
      raise StandardError("%s: Only one replica of master expected",
                          task_as_string(self.task))
validate.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main():
    if args.logdir is None:
        raise ValueError('Please specify the logdir file')

    ckpt = get_checkpoint(args.logdir)

    if ckpt is None:
        raise ValueError('No checkpoints in {}'.format(args.logdir))

    with open(os.path.join(args.logdir, 'architecture.json')) as f:
        arch = json.load(f)

    reader = VCC2016TFRManager()
    features = reader.read_whole(args.file_pattern, num_epochs=1)
    x = features['frame']
    y = features['label']
    filename = features['filename']
    y_conv = y * 0 + args.target_id

    net = MLPcVAE(arch=arch, is_training=False)
    z = net.encode(x)
    xh = net.decode(z, y)
    x_conv = net.decode(z, y_conv)

    pre_train_saver = tf.train.Saver()
    def load_pretrain(sess):
        pre_train_saver.restore(sess, ckpt)
    sv = tf.train.Supervisor(init_fn=load_pretrain)
    gpu_options = tf.GPUOptions(allow_growth=True)
    sess_config = tf.ConfigProto(
        allow_soft_placement=True,
        gpu_options=gpu_options)
    with sv.managed_session(config=sess_config) as sess:
        for _ in range(reader.n_files):
            if sv.should_stop():
                break
            fetch_dict = {'x': x, 'xh': xh, 'x_conv': x_conv, 'f': filename}
            results = sess.run(fetch_dict)
            plot_spectra(results)
vae.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train(self, nIter, machine=None, summary_op=None):
        # Xh = self._validate(machine=machine, n=10)

        run_metadata = tf.RunMetadata()

        sv = tf.train.Supervisor(
            logdir=self.dirs['logdir'],
            # summary_writer=summary_writer,
            # summary_op=None,
            # is_chief=True,
            save_model_secs=300,
            global_step=self.opt['global_step'])


        # sess_config = configure_gpu_settings(args.gpu_cfg)
        sess_config = tf.ConfigProto(
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(allow_growth=True))

        with sv.managed_session(config=sess_config) as sess:
            sv.loop(60, self._refresh_status, (sess,))
            for step in range(self.arch['training']['max_iter']):
                if sv.should_stop():
                    break

                # main loop
                sess.run(self.opt['g'])

                # # output img
                # if step % 1000 == 0:
                #     xh = sess.run(Xh)
                #     with tf.gfile.GFile(
                #         os.path.join(
                #             self.dirs['logdir'],
                #             'img-anime-{:03d}k.png'.format(step // 1000),
                #         ),
                #         mode='wb',
                #     ) as fp:
                #         fp.write(xh)
transfer_cifar10_softmax_b1.py 文件源码 项目:deligan 作者: val-iisc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def serialize_cifar_pool3(X,filename):
    print 'About to generate file: %s' % filename
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
    X_pool3 = batch_pool3_features(sess,X)
    np.save(filename,X_pool3)
main.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def restrict_gpu_memory(per_process_gpu_memory_fraction: float = 0.9):
    import os
    import tensorflow as tf
    import keras
    thread_count = os.environ.get('OMP_NUM_THREADS')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=per_process_gpu_memory_fraction)
    config = tf.ConfigProto(gpu_options=gpu_options,
                            allow_soft_placement=True,
                            intra_op_parallelism_threads=thread_count) \
        if thread_count else tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
    keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
main.py 文件源码 项目:photo-editing-tensorflow 作者: JamesChuanggg 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def main(_):
  # preprocess
  conf.observation_dims = eval(conf.observation_dims)

  # start
  gpu_options = tf.GPUOptions(
      per_process_gpu_memory_fraction=calc_gpu_fraction(conf.gpu_fraction))

  dataset = data_loader(conf.source_path, conf.target_path)

  with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    env = Curve()

    pred_network = CNN(sess=sess,
                       observation_dims=conf.observation_dims,
                       name='pred_network', 
               trainable=True)

    policy = Policy(sess=sess, 
            pred_network=pred_network,
            env=env,
            dataset=dataset,
            conf=conf)

    if conf.is_train:
        policy.train()
    else:
    policy.test(conf.test_image_path)
tweet_replyer.py 文件源码 项目:tensorflow_seq2seq_chatbot 作者: higepon 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def twitter_bot():
    # Only allocate part of the gpu memory when predicting.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)

    consumer_key = os.getenv("consumer_key")
    consumer_secret = os.getenv("consumer_secret")
    access_token = os.getenv("access_token")
    access_token_secret = os.getenv("access_token_secret")

    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    api = tweepy.API(auth)
    with tf.Session(config=tf_config) as sess:
        predictor = predict.EasyPredictor(sess)

        for tweet in tweets():
            status_id, status, bot_flag = tweet
            print("Processing {0}...".format(status.text))
            screen_name = status.author.screen_name
            replies = predictor.predict(status.text)
            if not replies:
                print("no reply")
                continue
            reply_body = replies[0]
            if reply_body is None:
                print("No reply predicted")
            else:
                try:
                    post_reply(api, bot_flag, reply_body, screen_name, status_id)
                except tweepy.TweepError as e:
                    # duplicate status
                    if e.api_code == 187:
                        pass
                    else:
                        raise
            mark_tweet_processed(status_id)
evaluate.py 文件源码 项目:docnade 作者: AYLIEN 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def evaluate(model, dataset, params):
    with tf.Session(config=tf.ConfigProto(
        inter_op_parallelism_threads=params.num_cores,
        intra_op_parallelism_threads=params.num_cores,
        gpu_options=tf.GPUOptions(allow_growth=True)
    )) as session:
        tf.local_variables_initializer().run()
        tf.global_variables_initializer().run()

        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state(params.model)
        saver.restore(session, ckpt.model_checkpoint_path)

        evaluate_retrieval(model, dataset, params, session)
        evaluate_loss(model, dataset, params, session)


问题


面经


文章

微信
公众号

扫码关注公众号