python类list_local_devices()的实例源码

graph.py 文件源码 项目:dvae 作者: dojoteef 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_available_gpus():
    """ Get the number of available gpus """
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']
test_util.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def haveGpu0(self):
    device_names = [d.name for d in device_lib.list_local_devices()]
    return "/gpu:0" in device_names
imperative.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def haveGpu0(self):
    device_names = [d.name for d in device_lib.list_local_devices()]
    return "/gpu:0" in device_names
check.py 文件源码 项目:inception_v3 作者: Cyber-Neuron 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']
tf.py 文件源码 项目:imageCrack 作者: mario1oreo 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_available_gpus(num_gpus=None):
    """
    Modified on http://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow
    However, the original code will occupy all available gpu memory.
    The modified code need a parameter: num_gpus. It does nothing but return the device handler name
    It will work well on single-maching-training, but I don't know whether it will work well on a cluster.
    """
    if num_gpus == None:
        from tensorflow.python.client import device_lib as _device_lib
        local_device_protos = _device_lib.list_local_devices()
        return [x.name for x in local_device_protos if x.device_type == 'GPU']
    else:
        return ['/gpu:%d' % (idx) for idx in xrange(num_gpus)]
basic_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def testGradientColocation(self):
    """Tests a particular device (e.g. gpu, cpu) placement.

    This test ensures that the following device placement is possible:

    * The Linear module is on the gpu,
    * the optimizer is declared to be on the cpu,
    * but when calling minimize on the optimizer, we pass True to
      colocate_gradients_with_ops.

    The test exists because while one may expect tf.matmul(X, w) + b to be
    equivalent to tf.nn.xw_plus_b(X, w, b), with the latter this placement
    results in an InvalidArgumentError.

    Warning: if there is no gpu available to tensorflow this test will be
    skipped with just a warning! This is because the test requires that
    tensorflow has access to a gpu, but often this is not the case.
    """
    if not any(x.device_type == "GPU" for x in device_lib.list_local_devices()):
      tf.logging.warn("Skipping the gradient colocation test as there is no "
                      "gpu available to tensorflow.")
      return
    n_outputs = 5
    n_inputs = 3
    batch_size = 7
    linear = snt.Linear(n_outputs)
    with tf.device("/cpu:*"):
      # Set up data.
      inputs = tf.placeholder(tf.float32, [batch_size, n_inputs])
      labels = tf.to_int64(np.ones((batch_size)))
      # Predictions.
      with tf.device("/gpu:*"):
        outputs = linear(inputs)
      # Calculate the loss.
      cross_entropy = tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits(  # pylint: disable=line-too-long
          outputs, labels, name="xentropy")
      loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
      # Optimizer.
      optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
      optimizer.minimize(loss, colocate_gradients_with_ops=True)
    init = tf.global_variables_initializer()
    try:
      with self.test_session(force_gpu=True) as sess:
        sess.run(init)
    except tf.errors.InvalidArgumentError as e:
      self.fail("Cannot start the session. Details:\n" + e.message)
config.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _query_gpu_info():
  """ This function query GPU information:
  ngpu
  [device_name, device_compute_capability, device_total_memory]

  Note
  ----
  this function use deviceQuery command, so you better have it
  in your path
  """
  dev = {'ngpu': 1,
     # deviceName: [cardName, computeCapability, mem(MB)]
     'dev0': ['Unknown', 3.0, 1024]}
  temp_dir = tempfile.mkdtemp()
  p = os.path.join(temp_dir, 'tmp.txt')
  queried = subprocess.call('deviceQuery > ' + p,
          shell=True,
          stdout=subprocess.PIPE,
          stderr=subprocess.PIPE) == 0
  dev = {}
  if queried: # found deviceQuery
    info = open(p, 'r').read()
    devNames = re.compile(r'Device \d: ".*"').findall(info)
    devNames = [i.strip().split(':')[-1].replace('"', '') for i in devNames]
    ngpu = len(devNames)
    comCap = re.compile(
        r'CUDA Capability Major\/Minor version number:\s*.*').findall(info)
    comCap = [float(i.strip().split(':')[-1]) for i in comCap]
    totalMems = re.compile(
        r'Total amount of global memory:\s*\d*').findall(info)
    totalMems = [int(i.strip().split(':')[-1]) for i in totalMems]
    # ====== create dev ====== #
    dev['ngpu'] = ngpu
    for i, (name, com, mem) in enumerate(zip(devNames, comCap, totalMems)):
      dev['dev%d' % i] = [name, com, mem]
  else:
    _warning('Cannot use "deviceQuery" to get GPU information for configuration.')
    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    dev['ngpu'] = 0
    for i, name in (x.name for x in local_device_protos
        if x.device_type == 'GPU'):
      dev['dev%d' % i] = [name, None, None]
      dev['ngpu'] += 1
  # remove temp-dir
  shutil.rmtree(temp_dir)
  return dev


问题


面经


文章

微信
公众号

扫码关注公众号