Python tensorflow.python.client.device_lib.list_local_devices() Examples

The following are 30 code examples of tensorflow.python.client.device_lib.list_local_devices(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.client.device_lib , or try the search function .
Example #1
Source File: logger.py    From models with Apache License 2.0 6 votes vote down vote up
def _collect_gpu_info(run_info):
  """Collect local GPU information by TF device library."""
  gpu_info = {}
  local_device_protos = device_lib.list_local_devices()

  gpu_info["count"] = len([d for d in local_device_protos
                           if d.device_type == "GPU"])
  # The device description usually is a JSON string, which contains the GPU
  # model info, eg:
  # "device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0"
  for d in local_device_protos:
    if d.device_type == "GPU":
      gpu_info["model"] = _parse_gpu_model(d.physical_device_desc)
      # Assume all the GPU connected are same model
      break
  run_info["machine_config"]["gpu_info"] = gpu_info 
Example #2
Source File: utils.py    From autowebcompat with Mozilla Public License 2.0 6 votes vote down vote up
def get_machine_info():
    parameter_value_map = {}
    operating_sys = sys.platform
    parameter_value_map['Operating System'] = operating_sys
    if 'linux' not in operating_sys:
        return parameter_value_map

    for i, device in enumerate(device_lib.list_local_devices()):
        if device.device_type != 'GPU':
            continue
        parameter_value_map['GPU_{}_name'.format(i + 1)] = device.name
        parameter_value_map['GPU_{}_memory_limit'.format(i + 1)] = device.memory_limit
        parameter_value_map['GPU_{}_description'.format(i + 1)] = device.physical_device_desc
    lscpu = subprocess.check_output("lscpu | grep '^CPU(s):\\|Core\\|Thread'", shell=True).strip().decode()
    lscpu = lscpu.split('\n')
    for row in lscpu:
        row = row.split(':')
        parameter_value_map[row[0]] = row[1].strip()
    return parameter_value_map 
Example #3
Source File: system_info_lib.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def gather_available_device_info():
  """Gather list of devices available to TensorFlow.

  Returns:
    A list of test_log_pb2.AvailableDeviceInfo messages.
  """
  device_info_list = []
  devices = device_lib.list_local_devices()

  for d in devices:
    device_info = test_log_pb2.AvailableDeviceInfo()
    device_info.name = d.name
    device_info.type = d.device_type
    device_info.memory_limit = d.memory_limit
    device_info.physical_description = d.physical_device_desc
    device_info_list.append(device_info)

  return device_info_list 
Example #4
Source File: logger.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def _collect_gpu_info(run_info):
  """Collect local GPU information by TF device library."""
  gpu_info = {}
  local_device_protos = device_lib.list_local_devices()

  gpu_info["count"] = len([d for d in local_device_protos
                           if d.device_type == "GPU"])
  # The device description usually is a JSON string, which contains the GPU
  # model info, eg:
  # "device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0"
  for d in local_device_protos:
    if d.device_type == "GPU":
      gpu_info["model"] = _parse_gpu_model(d.physical_device_desc)
      # Assume all the GPU connected are same model
      break
  run_info["machine_config"]["gpu_info"] = gpu_info 
Example #5
Source File: logger.py    From nsfw with Apache License 2.0 6 votes vote down vote up
def _collect_gpu_info(run_info):
  """Collect local GPU information by TF device library."""
  gpu_info = {}
  local_device_protos = device_lib.list_local_devices()

  gpu_info["count"] = len([d for d in local_device_protos
                           if d.device_type == "GPU"])
  # The device description usually is a JSON string, which contains the GPU
  # model info, eg:
  # "device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0"
  for d in local_device_protos:
    if d.device_type == "GPU":
      gpu_info["model"] = _parse_gpu_model(d.physical_device_desc)
      # Assume all the GPU connected are same model
      break
  run_info["machine_config"]["gpu_info"] = gpu_info 
Example #6
Source File: train_ssd_large.py    From inference with Apache License 2.0 6 votes vote down vote up
def validate_batch_size_for_multi_gpu(batch_size):
    """For multi-gpu, batch-size must be a multiple of the number of
    available GPUs.
    Note that this should eventually be handled by replicate_model_fn
    directly. Multi-GPU support is currently experimental, however,
    so doing the work here until that feature is in place.
    """
    if FLAGS.multi_gpu:
        from tensorflow.python.client import device_lib
        local_device_protos = device_lib.list_local_devices()
        num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
        if not num_gpus:
            raise ValueError('Multi-GPU mode was specified, but no GPUs '
                             'were found. To use CPU, run --multi_gpu=False.')
        remainder = batch_size % num_gpus
        if remainder:
            err = ('When running with multiple GPUs, batch size '
                   'must be a multiple of the number of available GPUs. '
                   'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
                  ).format(num_gpus, batch_size, batch_size - remainder)
            raise ValueError(err)
        return num_gpus
    return 0 
Example #7
Source File: sg_main.py    From sugartensor with MIT License 6 votes vote down vote up
def sg_gpus():
    r""" Gets current available GPU nums

    Returns:
      A integer : total # of GPUs available
    """
    global _gpus

    if _gpus is None:
        local_device_protos = device_lib.list_local_devices()
        _gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU'])

    return max(_gpus, 1)


#
# context helpers
# 
Example #8
Source File: logger.py    From models with Apache License 2.0 6 votes vote down vote up
def _collect_gpu_info(run_info):
  """Collect local GPU information by TF device library."""
  gpu_info = {}
  local_device_protos = device_lib.list_local_devices()

  gpu_info["count"] = len([d for d in local_device_protos
                           if d.device_type == "GPU"])
  # The device description usually is a JSON string, which contains the GPU
  # model info, eg:
  # "device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0"
  for d in local_device_protos:
    if d.device_type == "GPU":
      gpu_info["model"] = _parse_gpu_model(d.physical_device_desc)
      # Assume all the GPU connected are same model
      break
  run_info["machine_config"]["gpu_info"] = gpu_info 
Example #9
Source File: base.py    From batchflow with Apache License 2.0 6 votes vote down vote up
def _get_devices(self):
        available_devices = device_lib.list_local_devices()

        # Remove internal `XLA` devices, see `using JIT compilation <https://www.tensorflow.org/xla/jit>`_.
        usable_devices = [device.name for device in available_devices
                          if 'XLA' not in device.name]

        if self.config.get('device'):
            devices = self.config.get('device')
            devices = devices if isinstance(devices, list) else [devices]
            devices = [device for name in devices for device in usable_devices
                       if re.search(name.upper(), device.upper()) is not None]
            devices = [device for i, device in enumerate(devices)
                       if device not in devices[:i]]
        else:
            cpu_devices = [device for device in usable_devices
                           if 'CPU' in device]
            gpu_devices = [device for device in usable_devices
                           if 'GPU' in device]
            if gpu_devices:
                devices = [gpu_devices[0]]
            else:
                devices = [cpu_devices[0]]
        return devices 
Example #10
Source File: utils.py    From Deep-Image-Matting with MIT License 5 votes vote down vote up
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


# getting the number of CPUs 
Example #11
Source File: tensorflowbk.py    From quantumflow with Apache License 2.0 5 votes vote down vote up
def gpu_available() -> bool:
    local_device_protos = device_lib.list_local_devices()
    gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
    return len(gpus) != 0 
Example #12
Source File: gpu.py    From petridishnn with MIT License 5 votes vote down vote up
def get_num_gpu():
    """
    Returns:
        int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
    """

    def warn_return(ret, message):
        try:
            import tensorflow as tf
        except ImportError:
            return ret

        built_with_cuda = tf.test.is_built_with_cuda()
        if not built_with_cuda and ret > 0:
            logger.warn(message + "But TensorFlow was not built with CUDA support and could not use GPUs!")
        return ret

    env = os.environ.get('CUDA_VISIBLE_DEVICES', None)
    if env:
        return warn_return(len(env.split(',')), "Found non-empty CUDA_VISIBLE_DEVICES. ")
    output, code = subproc_call("nvidia-smi -L", timeout=5)
    if code == 0:
        output = output.decode('utf-8')
        return warn_return(len(output.strip().split('\n')), "Found nvidia-smi. ")
    try:
        # Use NVML to query device properties
        with NVMLContext() as ctx:
            return warn_return(ctx.num_devices(), "NVML found nvidia devices. ")
    except Exception:
        # Fallback
        # Note this will initialize all GPUs and therefore has side effect
        # https://github.com/tensorflow/tensorflow/issues/8136
        logger.info("Loading local devices by TensorFlow ...")
        from tensorflow.python.client import device_lib
        local_device_protos = device_lib.list_local_devices()
        return len([x.name for x in local_device_protos if x.device_type == 'GPU']) 
Example #13
Source File: utils.py    From tfutils with MIT License 5 votes vote down vote up
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
Example #14
Source File: model_deploy.py    From shuttleNet with GNU General Public License v3.0 5 votes vote down vote up
def get_available_gpus():
  local_device_protos = device_lib.list_local_devices()
  return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
Example #15
Source File: tensorflow2bk.py    From quantumflow with Apache License 2.0 5 votes vote down vote up
def gpu_available() -> bool:
    local_device_protos = device_lib.list_local_devices()
    gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
    return len(gpus) != 0 
Example #16
Source File: misc.py    From NJUNMT-tf with Apache License 2.0 5 votes vote down vote up
def get_available_gpus():
    """Returns a list of available GPU devices names. """
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == "GPU"] 
Example #17
Source File: mdbt_util.py    From tatk with Apache License 2.0 5 votes vote down vote up
def get_available_devs():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
Example #18
Source File: utility.py    From batch-ppo with Apache License 2.0 5 votes vote down vote up
def available_gpus():
  """List of GPU device names detected by TensorFlow."""
  local_device_protos = device_lib.list_local_devices()
  return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
Example #19
Source File: control_flow_ops_py_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGuardedAssertDoesNotCopyWhenTrue(self):
    with self.test_session(use_gpu=True) as sess:
      with tf.device("/gpu:0"):
        value = tf.constant(1.0)
      with tf.device("/cpu:0"):
        true = tf.constant(True)
        guarded_assert = tf.Assert(true, [value], name="guarded")
        unguarded_assert = gen_logging_ops._assert(
            true, [value], name="unguarded")
      opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      guarded_metadata = tf.RunMetadata()
      sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
      unguarded_metadata = tf.RunMetadata()
      sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
      guarded_nodestat_names = [
          n.node_name for d in guarded_metadata.step_stats.dev_stats
          for n in d.node_stats]
      unguarded_nodestat_names = [
          n.node_name for d in unguarded_metadata.step_stats.dev_stats
          for n in d.node_stats]
      guarded_memcpy_nodestat_names = [
          n for n in guarded_nodestat_names if "MEMCPYDtoH" in n]
      unguarded_memcpy_nodestat_names = [
          n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n]
      if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
        # A copy was performed for the unguarded assert
        self.assertLess(0, len(unguarded_memcpy_nodestat_names))
      # No copy was performed for the guarded assert
      self.assertEqual([], guarded_memcpy_nodestat_names) 
Example #20
Source File: abstract_net.py    From bonnet with GNU General Public License v3.0 5 votes vote down vote up
def gpu_available(self):
    # can I use a gpu? Return number of GPUs available.
    # tensorflow is very greedy with the GPUs, and it always tries to use
    # everything available. So make sure you restrict its vision with
    # the CUDA_VISIBLE_DEVICES environment variable.
    n_gpus_avail = 0
    devices = device_lib.list_local_devices()
    for dev in devices:
      print("DEVICE AVAIL: ", dev.name)
      if '/device:GPU' in dev.name:
        n_gpus_avail += 1
    return n_gpus_avail 
Example #21
Source File: utils.py    From mixmatch with Apache License 2.0 5 votes vote down vote up
def get_available_gpus():
    global _GPUS
    if _GPUS is None:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        local_device_protos = device_lib.list_local_devices(session_config=config)
        _GPUS = tuple([x.name for x in local_device_protos if x.device_type == 'GPU'])
    return _GPUS 
Example #22
Source File: train_large_xt_cpn_onebyone.py    From tf.fashionAI with Apache License 2.0 5 votes vote down vote up
def validate_batch_size_for_multi_gpu(batch_size):
    """For multi-gpu, batch-size must be a multiple of the number of
    available GPUs.

    Note that this should eventually be handled by replicate_model_fn
    directly. Multi-GPU support is currently experimental, however,
    so doing the work here until that feature is in place.
    """
    if not FLAGS.multi_gpu:
        return 0

    from tensorflow.python.client import device_lib

    local_device_protos = device_lib.list_local_devices()
    num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
    if not num_gpus:
        raise ValueError('Multi-GPU mode was specified, but no GPUs '
                        'were found. To use CPU, run without --multi_gpu=False.')

    remainder = batch_size % num_gpus
    if remainder:
        err = ('When running with multiple GPUs, batch size '
                'must be a multiple of the number of available GPUs. '
                'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
                ).format(num_gpus, batch_size, batch_size - remainder)
        raise ValueError(err)
    return num_gpus 
Example #23
Source File: tf_replicate_model_fn.py    From tf.fashionAI with Apache License 2.0 5 votes vote down vote up
def _get_local_devices(device_type):
  local_device_protos = device_lib.list_local_devices()
  return [
      device.name
      for device in local_device_protos
      if device.device_type == device_type
  ] 
Example #24
Source File: train_simplenet_onebyone.py    From tf.fashionAI with Apache License 2.0 5 votes vote down vote up
def validate_batch_size_for_multi_gpu(batch_size):
    """For multi-gpu, batch-size must be a multiple of the number of
    available GPUs.

    Note that this should eventually be handled by replicate_model_fn
    directly. Multi-GPU support is currently experimental, however,
    so doing the work here until that feature is in place.
    """
    if not FLAGS.multi_gpu:
        return 0

    from tensorflow.python.client import device_lib

    local_device_protos = device_lib.list_local_devices()
    num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
    if not num_gpus:
        raise ValueError('Multi-GPU mode was specified, but no GPUs '
                        'were found. To use CPU, run without --multi_gpu=False.')

    remainder = batch_size % num_gpus
    if remainder:
        err = ('When running with multiple GPUs, batch size '
                'must be a multiple of the number of available GPUs. '
                'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
                ).format(num_gpus, batch_size, batch_size - remainder)
        raise ValueError(err)
    return num_gpus 
Example #25
Source File: run_dqn_ram.py    From deep-reinforcement-learning with MIT License 5 votes vote down vote up
def get_available_gpus():
    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] 
Example #26
Source File: run_dqn_atari.py    From deep-reinforcement-learning with MIT License 5 votes vote down vote up
def get_available_gpus():
    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] 
Example #27
Source File: _models.py    From DeepChatModels with MIT License 5 votes vote down vote up
def gpu_found():
    """Returns True if tensorflow finds at least 1 GPU."""
    devices = device_lib.list_local_devices()
    return len([x.name for x in devices if x.device_type == 'GPU']) > 0 
Example #28
Source File: utils_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_available_gpus():
    """
    Returns a list of string names of all available GPUs
    """
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
Example #29
Source File: utils.py    From FaceNet with Apache License 2.0 5 votes vote down vote up
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


# getting the number of CPUs 
Example #30
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def is_gpu_available():
    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    gpu_list = [x.name for x in local_device_protos if x.device_type == 'GPU']
    if len(gpu_list) < 0:
        print("Tensorflow GPU:", gpu_list)
        return True
    else:
        return False

#######################################################################
# Pooling
# -------