Python tensorflow.python.framework.function.Defun() Examples
The following are 30
code examples of tensorflow.python.framework.function.Defun().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.function
, or try the search function
.
Example #1
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testDefineFunctionNames(self): @function.Defun(tf.float32, func_name="Foo") def Foo(a): return a + 1 with tf.Graph().as_default(): call1 = Foo([1.0]) self.assertEqual("Foo", call1.op.name) call2 = Foo([1.0]) self.assertEqual("Foo_1", call2.op.name) # pylint: disable=unexpected-keyword-arg call3 = Foo([1.0], name="mine") self.assertEqual("mine", call3.op.name) with tf.name_scope("my"): call4 = Foo([1.0], name="precious") self.assertEqual("my/precious", call4.op.name)
Example #2
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testCustomGradientError(self): dtype = tf.float32 @function.Defun(dtype, dtype, dtype) def Grad(x, dy, dz): # Should have returned 1 result. return x, dy + dz @function.Defun(dtype, grad_func=Grad) def Forward(x): return x, x g = tf.Graph() with g.as_default(): inp = tf.placeholder(dtype) out = tf.add_n(Forward(inp)) dinp = tf.gradients(out, [inp]) x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32) with tf.Session(graph=g) as sess: with self.assertRaisesRegexp( tf.errors.InvalidArgumentError, "SymGrad expects to return 1.*but get 2.*instead"): _ = sess.run(dinp, {inp: x})
Example #3
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testTanhSymGrad(self): @function.Defun(tf.float32) def Forward(x): return tf.reduce_sum(tf.tanh(x)) g = tf.Graph() with g.as_default(): x = tf.placeholder(tf.float32) y = Forward(x) dx = tf.gradients([y], [x]) inp = np.array([-1, 1, 2, -2], dtype=np.float32) feed = {x: inp} cfg = tf.ConfigProto(graph_options=tf.GraphOptions( optimizer_options=tf.OptimizerOptions( opt_level=tf.OptimizerOptions.L1, do_function_inlining=True))) with tf.Session(graph=g, config=cfg) as sess: out, = sess.run(dx, feed) self.assertAllClose(1 - np.square(np.tanh(inp)), out)
Example #4
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testGradientFunc(self): @function.Defun(tf.float32, func_name="XSquarePlusOneFn") def XSquarePlusOne(x): return x * x + 1.0 @function.Defun(tf.float32, tf.float32) def XSquarePlusOneGrad(x, dy): dx = functional_ops._symbolic_gradient( input=[x, dy], Tout=[tf.float32], f="XSquarePlusOneFn", name="dx") return dx g = tf.Graph() with g.as_default(): call_f = XSquarePlusOne([2.0]) call_g = XSquarePlusOneGrad([2.0], [0.1]) with tf.Session() as sess: self.assertAllClose([5.0], sess.run(call_f)) self.assertAllClose([0.4], sess.run(call_g))
Example #5
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSymGradAttr(self): @function.Defun(noinline=True) def Foo(x): return x * 2 self.assertTrue( Foo.instantiate([tf.float32]).definition.attr["_noinline"].b) g = tf.Graph() with g.as_default(): x = tf.constant(3.0) y = Foo(x) dx, = tf.gradients(y, [x]) cfg = tf.ConfigProto(graph_options=tf.GraphOptions( optimizer_options=tf.OptimizerOptions( opt_level=tf.OptimizerOptions.L0, do_common_subexpression_elimination=True, do_function_inlining=True, do_constant_folding=True))) with self.test_session(graph=g, config=cfg): self.assertAllClose(y.eval(), 6.) self.assertAllClose(dx.eval(), 2.)
Example #6
Source File: native_module_test.py From hub with Apache License 2.0 | 6 votes |
def testTPUModuleInitializeOnceWithDefun(self): spec = hub.create_module_spec(stateful_random_rv_module_fn) @function.Defun() def import_computation(): context = TPUReplicateContext() context.Enter() m = hub.Module(spec, name="module_", trainable=True) return [m(), m()] with tf_v1.Graph().as_default(), tf_v1.Session() as sess: x = import_computation() sess.run(tf_v1.global_variables_initializer()) got = sess.run(x) # Check the values are equal. If the initializer ran on each call, # the values would be different. self.assertEqual(got[0], got[1])
Example #7
Source File: native_module_test.py From hub with Apache License 2.0 | 6 votes |
def testTPUPruneWithUnusedInput(self): spec = hub.create_module_spec(unused_input_module_fn) @function.Defun() def import_computation(x): context = TPUReplicateContext() context.Enter() m = hub.Module(spec, name="module_", trainable=True) return m({ "x": tf.cast(x, dtype=tf.int64), "unused": tf.constant(2, dtype=tf.int64) }) with tf_v1.Graph().as_default(), tf_v1.Session() as sess: x = import_computation(5) got = sess.run(x) self.assertEqual(got, 25)
Example #8
Source File: native_module_test.py From hub with Apache License 2.0 | 6 votes |
def testTPUModuleDoesntPruneControlDependencies(self): spec = hub.create_module_spec(control_dependency_module_fn) @function.Defun() def import_computation(): context = TPUReplicateContext() context.Enter() m = hub.Module(spec, name="module_", trainable=True) return m() with tf_v1.Graph().as_default(), tf_v1.Session() as sess: x = import_computation() got = sess.run(x) self.assertEqual(got, 5.0) # If the op got pruned, the following get_operation_by_name should fail # with a dependency error. tf_v1.get_default_graph().get_operation_by_name("module_/dependency_op")
Example #9
Source File: control_flow_ops_py_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testWhileFuncBasic(self): @function.Defun(tf.float32) def func(x): return tf.square(tf.square(x)) with self.test_session(): x = tf.constant(2.0, tf.float32) r = tf.while_loop( lambda i, v: i < 2, lambda i, v: [i + 1, func(v)], [tf.constant(0), x], [tensor_shape.unknown_shape(), tensor_shape.unknown_shape()]) self.assertEqual(r[1].eval(), 65536.0) r = tf.gradients(r, x)[0] self.assertEqual(r.eval(), 524288.0) self.assertEqual(len([op for op in x.graph.get_operations() if op.type == "Stack"]), 1)
Example #10
Source File: math_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def BatchMatMul(a, b): use_fp32_batch_matmul = (os.environ["use_fp32_batch_matmul"] == "true") xla_compile = (os.environ["xla_compile"] == "true") if use_fp32_batch_matmul: def DoFn(a, b): dtype = a.dtype a = tf.to_float(a) b = tf.to_float(b) return tf.cast(tf.matmul(a, b), dtype) # If using xla_compile, the fwd and bak per tower are wrapped in xla_compile if not xla_compile: DoFn = function.Defun(noinline=True)(DoFn) res = DoFn(a, b) res.set_shape((None, None, b.shape[-1].value)) else: # If xla_compile, leave to xla to handle the casts. res = DoFn(a, b) else: res = tf.matmul(a, b) return res
Example #11
Source File: math_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def BatchMatMul(a, b): use_fp32_batch_matmul = (os.environ["use_fp32_batch_matmul"] == "true") xla_compile = (os.environ["xla_compile"] == "true") if use_fp32_batch_matmul: def DoFn(a, b): dtype = a.dtype a = tf.to_float(a) b = tf.to_float(b) return tf.cast(tf.matmul(a, b), dtype) # If using xla_compile, the fwd and bak per tower are wrapped in xla_compile if not xla_compile: DoFn = function.Defun(noinline=True)(DoFn) res = DoFn(a, b) res.set_shape((None, None, b.shape[-1].value)) else: # If xla_compile, leave to xla to handle the casts. res = DoFn(a, b) else: res = tf.matmul(a, b) return res
Example #12
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def _test_spop_placeholder_without_shape_info(): with tf.Graph().as_default(): @function.Defun(*[tf.int32]*2) def Forward(x,y): print(x.name) print(y.name) b = tf.add(x, y) return b pl1 = tf.placeholder(tf.int32,name="pl1") pl2 = tf.placeholder(tf.int32,name="pl2") pl3 = tf.placeholder(tf.int32, name="pl3") data = np.array([[-1, 1], [2, -2]], dtype=np.int32) data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32) z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) z2 = z1 + pl3 compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True)
Example #13
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def _test_spop_function_invocation_defun(): with tf.Graph().as_default(): def fun1(a): return tf.multiply(a,a) def fun2(b): return tf.multiply(b,b) @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") def fun3(x,y): x = fun2(x) y = fun1(y) z = tf.add(x,y) return z op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True)
Example #14
Source File: math_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def BatchMatMul(a, b): use_fp32_batch_matmul = (os.environ["use_fp32_batch_matmul"] == "true") xla_compile = (os.environ["xla_compile"] == "true") if use_fp32_batch_matmul: def DoFn(a, b): dtype = a.dtype a = tf.to_float(a) b = tf.to_float(b) return tf.cast(tf.matmul(a, b), dtype) # If using xla_compile, the fwd and bak per tower are wrapped in xla_compile if not xla_compile: DoFn = function.Defun(noinline=True)(DoFn) res = DoFn(a, b) res.set_shape((None, None, b.shape[-1].value)) else: # If xla_compile, leave to xla to handle the casts. res = DoFn(a, b) else: res = tf.matmul(a, b) return res
Example #15
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testGradient(self): @function.Defun(func_name="Spec") def G(x, dy): return x * dy @function.Defun(grad_func=G) def F(x): return tf.exp(x) - tf.exp(-x) for dtype in [tf.float32, tf.float64]: g = tf.Graph() with g.as_default(): x = tf.constant(0.25, dtype) y = F(x) dx, = tf.gradients(y, x) with self.test_session(graph=g): self.assertAllClose(dx.eval(), 0.25)
Example #16
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def _test_spop_control_flow(): with tf.Graph().as_default(): @function.Defun(*[dtypes.float32] * 2) def Body1(x, y): with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"): z = math_ops.multiply(x, y) i = 0 while i<10 : i +=1 if i == 5: continue z = math_ops.multiply(x, y*i) return z op = gen_functional_ops.StatefulPartitionedCall( args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True)
Example #17
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testCapture(self): g = tf.Graph() with g.as_default(): w = tf.Variable(tf.constant([[1.0]])) b = tf.Variable(tf.constant([2.0])) # Foo() captures w and b. @function.Defun(tf.float32) def Foo(x): # Plus() captures b. @function.Defun(tf.float32) def Plus(y): return y + b return Plus(tf.matmul(w, x)) y = Foo(tf.constant([[10.]])) with self.test_session(graph=g): tf.global_variables_initializer().run() self.assertAllEqual(y.eval(), [[12.0]])
Example #18
Source File: dataset_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def make_one_shot_iterator(self): """Creates an `Iterator` for enumerating the elements of this dataset. **N.B.** The returned iterator will be initialized automatically. A "one-shot" iterator does not currently support re-initialization. Returns: An `Iterator` over the elements of this dataset. """ # NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is # a 0-argument function. @function.Defun(capture_by_value=True) def _make_dataset(): return self._as_variant_tensor() # pylint: disable=protected-access _make_dataset.add_to_graph(ops.get_default_graph()) return iterator_ops.Iterator( gen_dataset_ops.one_shot_iterator( dataset_factory=_make_dataset, output_types=nest.flatten(self.output_types), output_shapes=nest.flatten(self.output_shapes)), None, self.output_types, self.output_shapes)
Example #19
Source File: function_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testFunctionDecorator(self): @function.Defun(tf.float32, func_name="Minus1") def Minus1(b): return b - 1.0 with tf.Graph().as_default(): call1 = Minus1([2.]) self.assertTrue(isinstance(Minus1, function._DefinedFunction)) self.assertEqual(Minus1.name, "Minus1") # pylint: disable=unexpected-keyword-arg call2 = Minus1(call1, name="next") # pylint: enable=unexpected-keyword-arg self.assertEqual("next", call2.op.name) with tf.Session() as sess: self.assertAllEqual([1], sess.run(call1)) self.assertAllEqual([0], sess.run(call2))
Example #20
Source File: dataset_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def __init__(self, input_dataset, map_func, cycle_length, block_length): """See `Dataset.interleave()` for details.""" super(InterleaveDataset, self).__init__() self._input_dataset = input_dataset @function.Defun(*nest.flatten(input_dataset.output_types)) def tf_map_func(*args): """A wrapper for Defun that facilitates shape inference.""" # Pass in shape information from the input_dataset. for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)): arg.set_shape(shape) nested_args = nest.pack_sequence_as(input_dataset.output_types, args) if _should_unpack_args(nested_args): dataset = map_func(*nested_args) else: dataset = map_func(nested_args) if not isinstance(dataset, Dataset): raise TypeError("`map_func` must return a `Dataset` object.") self._output_types = dataset.output_types self._output_shapes = dataset.output_shapes return dataset._as_variant_tensor() # pylint: disable=protected-access self._map_func = tf_map_func self._map_func.add_to_graph(ops.get_default_graph()) self._cycle_length = ops.convert_to_tensor(cycle_length, dtype=dtypes.int64) self._block_length = ops.convert_to_tensor(block_length, dtype=dtypes.int64)
Example #21
Source File: dataset_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def __init__(self, input_dataset, map_func): """See `Dataset.flat_map()` for details.""" super(FlatMapDataset, self).__init__() self._input_dataset = input_dataset @function.Defun(*nest.flatten(input_dataset.output_types)) def tf_map_func(*args): """A wrapper for Defun that facilitates shape inference.""" # Pass in shape information from the input_dataset. for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)): arg.set_shape(shape) nested_args = nest.pack_sequence_as(input_dataset.output_types, args) if _should_unpack_args(nested_args): dataset = map_func(*nested_args) else: dataset = map_func(nested_args) if not isinstance(dataset, Dataset): raise TypeError("`map_func` must return a `Dataset` object.") self._output_types = dataset.output_types self._output_shapes = dataset.output_shapes return dataset._as_variant_tensor() # pylint: disable=protected-access self._map_func = tf_map_func self._map_func.add_to_graph(ops.get_default_graph())
Example #22
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_spop_device_assignment(): # This test case is to test that TVM rejects inconsistent device assignment # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will # be used as container graphs to internally execute "stateless" operations. tf.reset_default_graph() with tf.Graph().as_default(): def fun1(a): with ops.device("/GPU:0"): return tf.multiply(a,a) def fun2(b): with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): return tf.multiply(b,b) @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") def fun3(x,y): with ops.device("/CPU:0"): x = fun2(x) with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"): y = fun1(y) with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"): z = tf.add(x,y) return z op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], Tout=[dtypes.float32], f=fun3) with pytest.raises(Exception) as execinfo: compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
Example #23
Source File: tpu_estimator.py From estimator with Apache License 2.0 | 5 votes |
def _build_computation_for_inference(model_fn, labels, config, params): """Builds the computation with calls the model_fn for inference.""" capture = _CapturedObject() def computation(computation_input): """Computation to be passed to `TPUPartitionedCall()`.""" tpu_computation, tpu_capture = _build_tpu_computation_for_inference( model_fn, computation_input, labels, config, params) tensors_on_cpu = tf.compat.v1.tpu.rewrite(tpu_computation) tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph()) (estimator_spec, export_outputs_dict, export_outputs_list, predictions_dict) = ( tpu_capture.get()) predictions_list = tensors_on_cpu[:len(predictions_dict)] export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):] # Reconstruct tensors used in export_outputs, with TPU tensors replaced # with their CPU counterpart returned from `rewrite_for_inference()`. # `function.Defun()` does not like `None`s in return values, so we leave # `None`s out but record their positions for later reconstruction. export_outputs_list_without_none = [] none_indices = [] for i, t in enumerate(export_outputs_list): if t is None: none_indices.append(i) else: export_outputs_list_without_none.append( export_outputs_tpu_on_cpu_list.pop(0)) capture.capture( (estimator_spec, export_outputs_dict, predictions_dict, none_indices)) return predictions_list + export_outputs_list_without_none return computation, capture
Example #24
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_spop_constants(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) def constantsFn(x, y): vv = tf.constant([2, 3, 4], name="vv") z = tf.add(vv + x, y) return z a = tf.constant(20000, name = "a") b = tf.constant(40000, name = "b") spopFn = gen_functional_ops.StatefulPartitionedCall(args=[a, b], Tout=[tf.int32], f=constantsFn) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True)
Example #25
Source File: function_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testReduction(self): g = tf.Graph() # BN0 is computing batch normed matrix along rows. def BN0(x): mean = tf.reduce_mean(x, [0]) var = tf.reduce_mean(tf.square(x - mean)) # biased var rstd = tf.rsqrt(var + 1e-8) return (x - mean) * rstd # Wraps BatchNorm in a tf function. @function.Defun(tf.float32) def BN1(x): return BN0(x) with g.as_default(): x = tf.placeholder(tf.float32) y0 = BN0(x) # A plain graph y1 = BN1(x) # A tf function dx0, = tf.gradients([y0], [x]) dx1, = tf.gradients([y1], [x]) # Both should produce the same result and gradient. with self.test_session(graph=g) as sess: vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))}) self.assertAllClose(vals[0], vals[1]) self.assertAllClose(vals[2], vals[3])
Example #26
Source File: function_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testDocString(self): @function.Defun() def Foo(x): """Successor of x.""" return x + 1 g = tf.Graph() with g.as_default(): _ = Foo(1) self.assertEqual(g.as_graph_def().library.function[0].signature.description, "Successor of x.")
Example #27
Source File: function_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testStableName(self): @function.Defun() def Foo(x, y, z): return tf.tanh(tf.matmul(x, y) + z) self.assertEqual("Foo_19571794", Foo.instantiate([tf.float32] * 3).name)
Example #28
Source File: function_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testCaptureControls(self): g = tf.Graph() with g.as_default(): x = tf.constant([10.0]) x = tf.Print(x, [x], "outer") @function.Defun(tf.float32) def Foo(y): with tf.control_dependencies([x]): y = tf.Print(y, [y], "inner") return y with self.assertRaisesRegexp(ValueError, "not an element of this graph."): # NOTE: We still do not support capturing control deps. _ = Foo(x)
Example #29
Source File: function_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testDeclareTypeMistake(self): foo = function.Declare("Foo", [tf.float32], [tf.float32]) @function.Defun(tf.float32, func_name="Foo") def Foo(x): return x * x + 1 g = tf.Graph() with g.as_default(): y = foo(2.0) with self.test_session(graph=g): with self.assertRaisesRegexp(tf.errors.NotFoundError, "not registered"): _ = y.eval() g = tf.Graph() with g.as_default(): Foo.add_to_graph(g) y = foo(2) with self.test_session(graph=g): with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "int32.*float"): _ = y.eval() g = tf.Graph() with g.as_default(): Foo.add_to_graph(g) with self.assertRaisesRegexp( ValueError, "Expected number of arguments: 1, received: 2"): _ = foo(2.0, 2.0) g = tf.Graph() with g.as_default(): Foo.add_to_graph(g) y = foo(2.0) with self.test_session(graph=g): self.assertAllEqual(y.eval(), 5.0)
Example #30
Source File: function_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testCallErrors(self): @function.Defun() def Const(): return tf.constant(1) @function.Defun(tf.int32) def PlusOne(a): return a + 1 @function.Defun(tf.int32, tf.int32) def PlusMinus(a, b): return a + b, b - a with tf.Graph().as_default(): _ = Const() # pylint: disable=too-many-function-args # pylint: disable=unexpected-keyword-arg # pylint: disable=no-value-for-parameter with self.assertRaisesRegexp(ValueError, "arguments: 0"): _ = Const(1) with self.assertRaisesRegexp(ValueError, "arguments: 0"): _ = Const(1, 2) with self.assertRaisesRegexp(ValueError, "arguments: 1"): _ = PlusOne() _ = PlusOne(1) with self.assertRaisesRegexp(ValueError, "arguments: 1"): _ = PlusOne(1, 2) with self.assertRaisesRegexp(ValueError, "arguments: 2"): _ = PlusMinus() with self.assertRaisesRegexp(ValueError, "arguments: 2"): _ = PlusMinus(1) _ = PlusMinus(1, 2) _ = PlusOne(1, name="p1") with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"): _ = PlusOne(1, device="/gpu:0")