Python tensorflow.python.ops.math_ops.scalar_mul() Examples
The following are 10
code examples of tensorflow.python.ops.math_ops.scalar_mul().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: math_grad.py From lambda-packs with MIT License | 6 votes |
def _SquaredDifferenceGrad(op, grad): """Returns the gradient for (x-y)^2.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) # pylint: disable=protected-access rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: enable=protected-access # .op works with Tensors or IndexedSlices with ops.control_dependencies([grad.op]): # The parens ensure that if grad is IndexedSlices, it'll get multiplied by # Tensor (not a number like 2.0) which causes it to convert to Tensor. x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx), -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)) # Logical operations have no gradients.
Example #2
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _SquaredDifferenceGrad(op, grad): """Returns the gradient for (x-y)^2.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) # pylint: disable=protected-access rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: enable=protected-access # .op works with Tensors or IndexedSlices with ops.control_dependencies([grad.op]): # The parens ensure that if grad is IndexedSlices, it'll get multiplied by # Tensor (not a number like 2.0) which causes it to convert to Tensor. x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx), -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)) # Logical operations have no gradients.
Example #3
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _SquaredDifferenceGrad(op, grad): """Returns the gradient for (x-y)^2.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) # pylint: disable=protected-access rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: enable=protected-access # .op works with Tensors or IndexedSlices with ops.control_dependencies([grad.op]): # The parens ensure that if grad is IndexedSlices, it'll get multiplied by # Tensor (not a number like 2.0) which causes it to convert to Tensor. x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx), -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)) # Logical operations have no gradients.
Example #4
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _SquaredDifferenceGrad(op, grad): """Returns the gradient for (x-y)^2.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) # pylint: disable=protected-access rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: enable=protected-access with ops.control_dependencies([grad]): # The parens ensure that if grad is IndexedSlices, it'll get multiplied by # Tensor (not a number like 2.0) which causes it to convert to Tensor. x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx), -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)) # Logical operations have no gradients.
Example #5
Source File: math_grad.py From keras-lambda with MIT License | 6 votes |
def _SquaredDifferenceGrad(op, grad): """Returns the gradient for (x-y)^2.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) # pylint: disable=protected-access rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: enable=protected-access # .op works with Tensors or IndexedSlices with ops.control_dependencies([grad.op]): # The parens ensure that if grad is IndexedSlices, it'll get multiplied by # Tensor (not a number like 2.0) which causes it to convert to Tensor. x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx), -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)) # Logical operations have no gradients.
Example #6
Source File: math_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAcceptsRefs(self): var = variables.Variable(10) result = math_ops.scalar_mul(3, var) init = variables.global_variables_initializer() with self.test_session(use_gpu=True) as sess: sess.run(init) self.assertEqual(30, result.eval())
Example #7
Source File: math_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAcceptsConstant(self): const = constant_op.constant(10) result = math_ops.scalar_mul(3, const) with self.test_session(use_gpu=True): self.assertEqual(30, result.eval())
Example #8
Source File: math_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAcceptsTensor(self): tensor = array_ops.ones([10, 10]) result = math_ops.scalar_mul(3, tensor) expected = array_ops.ones([10, 10]) * 3 with self.test_session(use_gpu=True): self.assertAllEqual(expected.eval(), result.eval())
Example #9
Source File: math_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAcceptsIndexedSlices(self): values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2]) indices = constant_op.constant([0, 2, 5]) x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices)) with self.test_session(use_gpu=True): self.assertAllEqual(x.values.eval(), [[-6, -9], [-15, -21], [0, 3]]) self.assertAllEqual(x.indices.eval(), [0, 2, 5])
Example #10
Source File: optimizers.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def clip_norm(g, c, n): """Clip a tensor by norm. Arguments: g: gradient tensor to clip. c: clipping threshold. n: norm of gradient tensor. Returns: Clipped gradient tensor. """ if c > 0: condition = n >= c then_expression = lambda: math_ops.scalar_mul(c / n, g) else_expression = lambda: g # saving the shape to avoid converting sparse tensor to dense if isinstance(g, ops.Tensor): g_shape = copy.copy(g.get_shape()) elif isinstance(g, ops.IndexedSlices): g_shape = copy.copy(g.dense_shape) if condition.dtype != dtypes_module.bool: condition = math_ops.cast(condition, 'bool') g = control_flow_ops.cond(condition, then_expression, else_expression) if isinstance(g, ops.Tensor): g.set_shape(g_shape) elif isinstance(g, ops.IndexedSlices): g._dense_shape = g_shape # pylint: disable=protected-access return g