Python tensorflow.python.platform.test.is_gpu_available() Examples

The following are 30 code examples of tensorflow.python.platform.test.is_gpu_available(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.platform.test , or try the search function .
Example #1
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWithStrideOneValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 32
        input_size = [5, 3, 10, 12]
        expected_size = [5, num_filters, 12, 14]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [3, 3],
            stride=1,
            padding='VALID',
            data_format='NCHW')
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')

        sess.run(variables_lib.global_variables_initializer())
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #2
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testCompareBilinear(self):
    if test.is_gpu_available():
      input_shape = [1, 5, 6, 3]
      target_height = 8
      target_width = 12
      for nptype in [np.float32, np.float64]:
        for align_corners in [True, False]:
          img_np = np.arange(
              0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
          value = {}
          for use_gpu in [True, False]:
            with self.test_session(use_gpu=use_gpu):
              image = constant_op.constant(img_np, shape=input_shape)
              new_size = constant_op.constant([target_height, target_width])
              out_op = image_ops.resize_images(
                  image, new_size,
                  image_ops.ResizeMethod.BILINEAR,
                  align_corners=align_corners)
              value[use_gpu] = out_op.eval()
          self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5) 
Example #3
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWithStride2x5NCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 3, 2]
        expected_size = [1, num_filters, 6, 10]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 4],
            stride=[2, 5],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #4
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWithStride2x4NCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 3, 2]
        expected_size = [1, num_filters, 6, 8]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 4],
            stride=[2, 4],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #5
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWithStride2x1NCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 3, 2]
        expected_size = [1, num_filters, 6, 5]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 4],
            stride=[2, 1],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #6
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWith2x2StrideTwoValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 2, 2]
        expected_size = [1, num_filters, 4, 4]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 2],
            stride=[2, 2],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #7
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWith2x2StrideTwoSamePaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 2, 2]
        expected_size = [1, num_filters, 4, 4]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 2],
            stride=[2, 2],
            padding='SAME',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #8
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWith1x1StrideTwoSamePaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 1, 1]
        expected_size = [1, num_filters, 2, 2]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 2],
            stride=[2, 2],
            padding='SAME',
            data_format='NCHW')
        self.assertListEqual(list(output.get_shape().as_list()), expected_size)

        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #9
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWithStrideTwoValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 32
        input_size = [5, 3, 9, 11]
        expected_size = [5, num_filters, 19, 23]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [3, 3],
            stride=[2, 2],
            padding='VALID',
            data_format='NCHW')
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.get_shape().as_list()), expected_size)

        sess.run(variables_lib.global_variables_initializer())
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #10
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testOutputSizeWithStrideOneSamePaddingNCHW(self):
    # `NCHW` data format is only supported for `GPU` device.
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True) as sess:
        num_filters = 32
        input_size = [5, 3, 10, 12]
        expected_size = [5, num_filters, 10, 12]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [3, 3],
            stride=1,
            padding='SAME',
            data_format='NCHW')
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')

        sess.run(variables_lib.global_variables_initializer())
        self.assertListEqual(list(output.eval().shape), expected_size) 
Example #11
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      num_filters = 32
      input_size = [5, 3, 9, 11]
      expected_size = [None, num_filters, None, None]
      expected_size_dynamic = [5, num_filters, 7, 9]

      with self.session(use_gpu=True):
        images = array_ops.placeholder(np.float32,
                                       [None, input_size[1], None, None])
        output = layers_lib.convolution2d(
            images,
            num_filters, [3, 3],
            rate=1,
            padding='VALID',
            data_format='NCHW')
        variables_lib.global_variables_initializer().run()
        self.assertEqual(output.op.name, 'Conv/Relu')
        self.assertListEqual(output.get_shape().as_list(), expected_size)
        eval_output = output.eval({images: np.zeros(input_size, np.float32)})
        self.assertListEqual(list(eval_output.shape), expected_size_dynamic) 
Example #12
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testFusedBatchNormFloat16MatchesFloat32(self):
    if test.is_gpu_available(cuda_only=True):
      shape = [5, 4, 2, 3]
      res_32 = self._runFusedBatchNorm(shape, np.float32)
      res_16 = self._runFusedBatchNorm(shape, np.float16)
      self.assertAllClose(res_32, res_16, rtol=1e-3) 
Example #13
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testResizeDown(self):
    # This test is also conducted with int8, so 127 is the maximum
    # value that can be used.
    data = [127, 127, 64, 64,
            127, 127, 64, 64,
            64, 64, 127, 127,
            64, 64, 127, 127,
            50, 50, 100, 100,
            50, 50, 100, 100]
    expected_data = [127, 64,
                     64, 127,
                     50, 100]
    target_height = 3
    target_width = 2

    # Test out 3-D and 4-D image shapes.
    img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
    target_shapes = [[1, target_height, target_width, 1],
                     [target_height, target_width, 1]]

    for target_shape, img_shape in zip(target_shapes, img_shapes):

      for nptype in self.TYPES:
        img_np = np.array(data, dtype=nptype).reshape(img_shape)

        for opt in self.OPTIONS:
          if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
            with self.test_session(use_gpu=True):
              image = constant_op.constant(img_np, shape=img_shape)
              y = image_ops.resize_images(
                  image, [target_height, target_width], opt)
              expected = np.array(expected_data).reshape(target_shape)
              resized = y.eval()
              self.assertAllClose(resized, expected, atol=1e-5) 
Example #14
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testNoOp(self):
    img_shape = [1, 6, 4, 1]
    single_shape = [6, 4, 1]
    # This test is also conducted with int8, so 127 is the maximum
    # value that can be used.
    data = [127, 127, 64, 64,
            127, 127, 64, 64,
            64, 64, 127, 127,
            64, 64, 127, 127,
            50, 50, 100, 100,
            50, 50, 100, 100]
    target_height = 6
    target_width = 4

    for nptype in self.TYPES:
      img_np = np.array(data, dtype=nptype).reshape(img_shape)

      for opt in self.OPTIONS:
        if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
          with self.test_session(use_gpu=True) as sess:
            image = constant_op.constant(img_np, shape=img_shape)
            y = image_ops.resize_images(
                image, [target_height, target_width], opt)
            yshape = array_ops.shape(y)
            resized, newshape = sess.run([y, yshape])
            self.assertAllEqual(img_shape, newshape)
            self.assertAllClose(resized, img_np, atol=1e-5)

      # Resizing with a single image must leave the shape unchanged also.
      with self.test_session(use_gpu=True):
        img_single = img_np.reshape(single_shape)
        image = constant_op.constant(img_single, shape=single_shape)
        y = image_ops.resize_images(image, [target_height, target_width],
                                    self.OPTIONS[0])
        yshape = array_ops.shape(y)
        newshape = yshape.eval()
        self.assertAllEqual(single_shape, newshape) 
Example #15
Source File: session_debug_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def setUp(self):
    self._dump_root = tempfile.mkdtemp()

    if test.is_gpu_available():
      self._expected_partition_graph_count = 2
      self._expected_num_devices = 2
      self._main_device = "/job:localhost/replica:0/task:0/gpu:0"
    else:
      self._expected_partition_graph_count = 1
      self._expected_num_devices = 1
      self._main_device = "/job:localhost/replica:0/task:0/cpu:0" 
Example #16
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testChannelsFirst(self):
    # `bias_add` doesn't support NCHW on CPU.
    if test.is_gpu_available(cuda_only=True):
      for ndim in [3, 4, 5]:
        x = np.random.uniform(size=(4, 3, 2, 1)[:ndim])
        y = self._runGDN(x, x.shape, False, 'channels_first')
        self.assertEqual(x.shape, y.shape)
        self.assertAllClose(y, x / np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6) 
Example #17
Source File: nn_tools.py    From astroNN with MIT License 5 votes vote down vote up
def gpu_availability():
    """
    Detect gpu on user system

    :return: Whether at least a CUDA compatible GPU is detected and usable
    :rtype: bool
    :History: 2018-Apr-25 - Written - Henry Leung (University of Toronto)
    """
    # assume if using tensorflow-gpu, then Nvidia GPU is available
    if is_built_with_cuda():
        return is_gpu_available()
    else:
        return is_built_with_cuda() 
Example #18
Source File: signal_conv_test.py    From pcc_geo_cnn with MIT License 5 votes vote down vote up
def data_formats(self):
    # On CPU, many ops don't support the channels first data format. Hence, if
    # no GPU is available, we skip these tests.
    if test.is_gpu_available(cuda_only=True):
      return ("channels_first", "channels_last")
    else:
      return ("channels_last",) 
Example #19
Source File: benchmark_cnn_test.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def _check_has_gpu():
  if not test.is_gpu_available(cuda_only=True):
    raise ValueError(
        """You have asked to run part or all of this on GPU, but it appears
        that no GPU is available. If your machine has GPUs it is possible you
        do not have a version of TensorFlow with GPU support. To build with GPU
        support, add --config=cuda to the build flags.\n """) 
Example #20
Source File: session_debug_testlib.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def setUpClass(cls):
    if test.is_gpu_available():
      cls._expected_partition_graph_count = 2
      cls._expected_num_devices = 2
      gpu_name = test_util.gpu_device_name()
      cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
    else:
      cls._expected_partition_graph_count = 1
      cls._expected_num_devices = 1
      cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0" 
Example #21
Source File: spectral_ops_test_util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _use_eigen_kernels():
  use_eigen_kernels = False  # Eigen kernels are default
  if test.is_gpu_available(cuda_only=True):
    use_eigen_kernels = False
  return use_eigen_kernels 
Example #22
Source File: session_debug_testlib.py    From keras-lambda with MIT License 5 votes vote down vote up
def setUpClass(cls):
    if test.is_gpu_available():
      cls._expected_partition_graph_count = 2
      cls._expected_num_devices = 2
      cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
    else:
      cls._expected_partition_graph_count = 1
      cls._expected_num_devices = 1
      cls._main_device = "/job:localhost/replica:0/task:0/cpu:0" 
Example #23
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testNHWCAndNCHWTrainingProduceSameOutput(self):
    if test.is_gpu_available(cuda_only=True):
      for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
        nhwc = self._runBatchNormalizationWithFormat(
            data_format='NHWC', shape=shape, is_training=True)
        nchw = self._runBatchNormalizationWithFormat(
            data_format='NCHW', shape=shape, is_training=True)
        self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4) 
Example #24
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testNHWCAndNCHWInferenceProduceSameOutput(self):
    if test.is_gpu_available(cuda_only=True):
      for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
        nhwc = self._runBatchNormalizationWithFormat(
            data_format='NHWC', shape=shape, is_training=False)
        nchw = self._runBatchNormalizationWithFormat(
            data_format='NCHW', shape=shape, is_training=False)
        self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4) 
Example #25
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testNoneUpdatesCollectionIsTrainingVariableFusedNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with tf.Graph().as_default():
        self._testNoneUpdatesCollectionIsTrainingVariable(
            True, data_format='NCHW') 
Example #26
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testIsTrainingVariableFusedNCHWZeroDebias(self):
    if test.is_gpu_available(cuda_only=True):
      self._testIsTrainingVariable(
          True, data_format='NCHW', zero_debias_moving_mean=True) 
Example #27
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testIsTrainingVariableFusedNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      self._testIsTrainingVariable(True, data_format='NCHW') 
Example #28
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testDelayedUpdateMovingVarsFusedNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      self._testDelayedUpdateMovingVars(True, data_format='NCHW') 
Example #29
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testNoneUpdatesCollectionsFusedNCHWZeroDebias(self):
    if test.is_gpu_available(cuda_only=True):
      self._testNoneUpdatesCollections(
          True, data_format='NCHW', zero_debias_moving_mean=True) 
Example #30
Source File: segnet_vgg_test.py    From MachineLearning with Apache License 2.0 5 votes vote down vote up
def testMaxPoolingWithArgmax(self):
        # MaxPoolWithArgMax is implemented only on CUDA.
        if not test.is_gpu_available(cuda_only=True):
            return
        '''[[[[  1.   2.]
              [  3.   4.]
              [  5.   6.]]
             [[  7.   8.]
              [  9.  10.]
              [ 11.  12.]]
             [[ 13.  14.]
              [ 15.  16.]
              [ 17.  18.]]]]'''
        tensor_input = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
                        10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
        with self.test_session(use_gpu=True) as sess:
            t = constant_op.constant(tensor_input, shape=[1, 3, 3, 2])
            out_op, argmax_op = segnet_vgg.max_pool_with_argmax(t)
            out, argmax = sess.run([out_op, argmax_op])
            self.assertShapeEqual(out, out_op)
            self.assertShapeEqual(argmax, argmax_op)
            '''[[[9, 10]
                 [11, 12]]
                [[15, 16]
                 [17, 18]]]'''
            self.assertAllClose(out.ravel(), [9., 10., 11., 12., 15., 16., 17., 18.])
            self.assertAllEqual(argmax.ravel(), [8, 9, 10, 11, 14, 15, 16, 17])