Python tensorflow.python.eager.context.num_gpus() Examples

The following are 23 code examples of tensorflow.python.eager.context.num_gpus(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.eager.context , or try the search function .
Example #1
Source File: resnet_imagenet_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_xla_2_gpu_fp16(self):
    """Test Keras model with XLA, 2 GPUs and fp16."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-dtype", "fp16",
        "-enable_xla", "true",
        "-distribution_strategy", "default",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #2
Source File: resnet_imagenet_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_xla_2_gpu_fp16(self, flags_key):
    """Test Keras model with XLA, 2 GPUs and fp16."""
    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-dtype", "fp16",
        "-enable_xla", "true",
        "-distribution_strategy", "mirrored",
    ]
    extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)

    if "polynomial_decay" in extra_flags:
      self.skipTest("Pruning with fp16 is not currently supported.")

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #3
Source File: resnet_imagenet_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu_fp16(self, flags_key):
    """Test Keras model with 2 GPUs and fp16."""

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-dtype", "fp16",
        "-distribution_strategy", "mirrored",
    ]
    extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)

    if "polynomial_decay" in extra_flags:
      self.skipTest("Pruning with fp16 is not currently supported.")

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #4
Source File: resnet_imagenet_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_xla_2_gpu(self, flags_key):
    """Test Keras model with XLA and 2 GPUs."""

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-enable_xla", "true",
        "-distribution_strategy", "mirrored",
    ]
    extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #5
Source File: resnet_imagenet_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu(self, flags_key):
    """Test Keras model with 2 GPUs."""

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-distribution_strategy", "mirrored",
    ]
    extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #6
Source File: resnet_imagenet_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_1_gpu(self, flags_key):
    """Test Keras model with 1 GPU."""

    if context.num_gpus() < 1:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(1, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "1",
        "-distribution_strategy", "mirrored",
        "-enable_checkpoint_and_export", "1",
    ]
    extra_flags = extra_flags + self.get_extra_flags_dict(flags_key)

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #7
Source File: resnet_cifar_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_graph_2_gpu(self):
    """Test Keras model in legacy graph mode with 2 GPUs."""
    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-enable_eager", "false",
        "-distribution_strategy", "mirrored",
        "-model_dir", "keras_cifar_graph_2_gpu",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #8
Source File: resnet_cifar_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu(self):
    """Test Keras model with 2 GPUs."""

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-distribution_strategy", "mirrored",
        "-model_dir", "keras_cifar_2_gpu",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #9
Source File: resnet_cifar_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_graph_1_gpu(self):
    """Test Keras model in legacy graph mode with 1 GPU."""
    if context.num_gpus() < 1:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(1, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "1",
        "-noenable_eager",
        "-distribution_strategy", "mirrored",
        "-model_dir", "keras_cifar_graph_1_gpu",
        "-data_format", "channels_last",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #10
Source File: resnet_cifar_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_1_gpu(self):
    """Test Keras model with 1 GPU."""

    if context.num_gpus() < 1:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(1, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "1",
        "-distribution_strategy", "mirrored",
        "-model_dir", "keras_cifar_1_gpu",
        "-data_format", "channels_last",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #11
Source File: resnet_imagenet_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu_fp16(self):
    """Test Keras model with 2 GPUs and fp16."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-dtype", "fp16",
        "-distribution_strategy", "default",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #12
Source File: resnet_imagenet_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_xla_2_gpu(self):
    """Test Keras model with XLA and 2 GPUs."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-enable_xla", "true",
        "-distribution_strategy", "default",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #13
Source File: resnet_imagenet_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu(self):
    """Test Keras model with 2 GPUs."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-distribution_strategy", "default",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #14
Source File: resnet_imagenet_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_1_gpu_fp16(self):
    """Test Keras model with 1 GPU and fp16."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 1:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available"
          .format(1, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "1",
        "-dtype", "fp16",
        "-distribution_strategy", "default",
        "-data_format", "channels_last",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #15
Source File: resnet_cifar_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_graph_2_gpu(self):
    """Test Keras model in legacy graph mode with 2 GPUs."""
    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-enable_eager", "false",
        "-distribution_strategy", "default",
        "-model_dir", "keras_cifar_graph_2_gpu",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #16
Source File: resnet_cifar_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu(self):
    """Test Keras model with 2 GPUs."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 2:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(2, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "2",
        "-distribution_strategy", "default",
        "-model_dir", "keras_cifar_2_gpu",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #17
Source File: resnet_cifar_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_graph_1_gpu(self):
    """Test Keras model in legacy graph mode with 1 GPU."""
    if context.num_gpus() < 1:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(1, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "1",
        "-noenable_eager",
        "-distribution_strategy", "default",
        "-model_dir", "keras_cifar_graph_1_gpu",
        "-data_format", "channels_last",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #18
Source File: resnet_cifar_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_1_gpu(self):
    """Test Keras model with 1 GPU."""
    config = keras_utils.get_config_proto_v1()
    tf.compat.v1.enable_eager_execution(config=config)

    if context.num_gpus() < 1:
      self.skipTest(
          "{} GPUs are not available for this test. {} GPUs are available".
          format(1, context.num_gpus()))

    extra_flags = [
        "-num_gpus", "1",
        "-distribution_strategy", "default",
        "-model_dir", "keras_cifar_1_gpu",
        "-data_format", "channels_last",
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=resnet_cifar_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #19
Source File: ctl_imagenet_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_end_to_end_2_gpu(self):
    """Test Keras model with 2 GPUs."""
    num_gpus = '2'
    if context.num_gpus() < 2:
      num_gpus = '0'

    extra_flags = [
        '-num_gpus', num_gpus,
        '-distribution_strategy', 'default',
        '-model_dir', 'ctl_imagenet_2_gpu',
        '-data_format', 'channels_last',
    ]
    extra_flags = extra_flags + self._extra_flags

    integration.run_synthetic(
        main=ctl_imagenet_main.run,
        tmp_root=self.get_temp_dir(),
        extra_flags=extra_flags
    ) 
Example #20
Source File: distribute_strategy_estimator_training_test.py    From estimator with Apache License 2.0 5 votes vote down vote up
def test_complete_flow_independent_worker_between_graph(
      self, train_distribute_cls, eval_distribute_cls):
    if (context.num_gpus() < 2 and eval_distribute_cls ==
        tf.distribute.experimental.MultiWorkerMirroredStrategy):
      self.skipTest("`CollectiveAllReduceStrategy` needs at least two towers.")

    if (train_distribute_cls ==
        tf.distribute.experimental.ParameterServerStrategy):
      cluster_spec = multi_worker_test_base.create_cluster_spec(
          num_workers=3, num_ps=2, has_eval=True)
      # 3 workers, 2 ps and 1 evaluator.
      self._barrier = dc._Barrier(6)
    else:
      cluster_spec = multi_worker_test_base.create_cluster_spec(
          num_workers=3, num_ps=0, has_eval=True)
      # 3 workers and 1 evaluator.
      self._barrier = dc._Barrier(4)

    train_distribute = self._get_strategy_object(
        train_distribute_cls, cluster_spec=cluster_spec)

    if eval_distribute_cls:
      eval_distribute = self._get_strategy_object(
          eval_distribute_cls, eval_strategy=True)
    else:
      eval_distribute = None

    threads = self.run_multiple_tasks_in_threads(self._independent_worker_fn,
                                                 cluster_spec, train_distribute,
                                                 eval_distribute)
    threads_to_join = []
    for task_type, ts in threads.items():
      if task_type == PS:
        continue
      for t in ts:
        threads_to_join.append(t)
    self.join_independent_workers(threads_to_join)

    estimator = self._get_estimator(train_distribute, eval_distribute)
    self._inspect_train_and_eval_events(estimator) 
Example #21
Source File: utils.py    From zoo with Apache License 2.0 5 votes vote down vote up
def get_distribution_scope(batch_size):
    if num_gpus() > 1:
        strategy = tf.distribute.MirroredStrategy()
        assert (
            batch_size % strategy.num_replicas_in_sync == 0
        ), f"Batch size {batch_size} cannot be divided onto {num_gpus()} GPUs"
        distribution_scope = strategy.scope
    else:
        if sys.version_info >= (3, 7):
            distribution_scope = contextlib.nullcontext
        else:
            distribution_scope = contextlib.suppress

    return distribution_scope() 
Example #22
Source File: distribute_strategy_estimator_training_test.py    From estimator with Apache License 2.0 5 votes vote down vote up
def _get_strategy_object(self,
                           strategy_cls,
                           cluster_spec=None,
                           eval_strategy=False):
    if strategy_cls == tf.distribute.MirroredStrategy:
      if eval_strategy:
        return strategy_cls()
      else:
        return strategy_cls(
            cross_device_ops=self._make_cross_device_ops(
                num_gpus_per_worker=context.num_gpus()))
    elif (strategy_cls == tf.distribute.MirroredStrategy and not eval_strategy):
      return strategy_cls(
          num_gpus_per_worker=context.num_gpus(),
          cross_device_ops=self._make_cross_device_ops(
              num_gpus_per_worker=context.num_gpus()))
    elif strategy_cls == tf.distribute.experimental.ParameterServerStrategy:
      assert cluster_spec is not None
      cluster_resolver = SimpleClusterResolver(
          cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
          task_type="ps",
          task_id=0,
          num_accelerators={"GPU": context.num_gpus()})
      return strategy_cls(cluster_resolver)
    elif strategy_cls == tf.distribute.experimental.CentralStorageStrategy:
      return strategy_cls._from_num_gpus(context.num_gpus())
    else:
      return strategy_cls() 
Example #23
Source File: utils.py    From rethinking-bnn-optimization with Apache License 2.0 5 votes vote down vote up
def get_distribution_scope(batch_size):
    if num_gpus() > 1:
        strategy = tf.distribute.MirroredStrategy()
        assert (
            batch_size % strategy.num_replicas_in_sync == 0
        ), f"Batch size {batch_size} cannot be divided onto {num_gpus()} GPUs"
        distribution_scope = strategy.scope
    else:
        if sys.version_info >= (3, 7):
            distribution_scope = contextlib.nullcontext
        else:
            distribution_scope = contextlib.suppress

    return distribution_scope()