Python tensorflow.python.ops.math_ops.reciprocal() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.reciprocal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: nn_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract( math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)
Example #2
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _LogGrad(op, grad): """Returns grad * (1/x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * math_ops.reciprocal(x)
Example #3
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _Log1pGrad(op, grad): """Returns grad * (1/(1 + x)).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * math_ops.reciprocal(1 + x)
Example #4
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _AtanhGrad(op, grad): """Returns grad * 1/ (1 - x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) inv = math_ops.reciprocal(math_ops.subtract(one, x2)) return grad * inv
Example #5
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _TanGrad(op, grad): """Returns grad * 1/sec^2(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) secx = math_ops.reciprocal(math_ops.cos(x)) secx2 = math_ops.square(secx) return grad * secx2
Example #6
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _AcosGrad(op, grad): """Returns grad * -1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) inv = math_ops.reciprocal(den) return -grad * inv
Example #7
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _AtanGrad(op, grad): """Returns grad * 1/ (1 + x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) inv = math_ops.reciprocal(math_ops.add(one, x2)) return grad * inv
Example #8
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _AngleGrad(op, grad): """Returns -grad / (Im(x) + iRe(x))""" x = op.inputs[0] with ops.control_dependencies([grad]): re = math_ops.real(x) im = math_ops.imag(x) z = math_ops.reciprocal(math_ops.complex(im, re)) zero = constant_op.constant(0, dtype=grad.dtype) complex_grad = math_ops.complex(grad, zero) return -complex_grad * z
Example #9
Source File: linalg_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _SelfAdjointEigV2Grad(op, grad_e, grad_v): """Gradient for SelfAdjointEigV2.""" e = op.outputs[0] compute_v = op.get_attr("compute_v") # a = op.inputs[0], which satisfies # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i] with ops.control_dependencies([grad_e, grad_v]): if compute_v: v = op.outputs[1] # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0). # Notice that because of the term involving f, the gradient becomes # infinite (or NaN in practice) when eigenvalues are not unique. # Mathematically this should not be surprising, since for (k-fold) # degenerate eigenvalues, the corresponding eigenvectors are only defined # up to arbitrary rotation in a (k-dimensional) subspace. f = array_ops.matrix_set_diag( math_ops.reciprocal( array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e)) grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e) + f * math_ops.matmul(v, grad_v, adjoint_a=True), v, adjoint_b=True)) else: _, v = linalg_ops.self_adjoint_eig(op.inputs[0]) grad_a = math_ops.matmul(v, math_ops.matmul( array_ops.matrix_diag(grad_e), v, adjoint_b=True)) # The forward op only depends on the lower triangular part of a, so here we # symmetrize and take the lower triangle grad_a = array_ops.matrix_band_part( grad_a + math_ops.conj(array_ops.matrix_transpose(grad_a)), -1, 0) grad_a = array_ops.matrix_set_diag(grad_a, 0.5 * array_ops.matrix_diag_part(grad_a)) return grad_a
Example #10
Source File: spectral_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _IRFFTGradHelper(rank, rfft_fn): """Returns a gradient function for an IRFFT of the provided rank.""" # Can't happen because we don't register a gradient for IRFFT3D. assert rank in (1, 2), "Gradient for IRFFT3D is not implemented." def _Grad(op, grad): """A gradient function for IRFFT with the provided `rank` and `rfft_fn`.""" # Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs # and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the # graph we special-case the situation where the FFT length and last # dimension of the input are known at graph construction time. fft_length = op.inputs[1] is_odd = math_ops.mod(fft_length[-1], 2) input_last_dimension = array_ops.shape(op.inputs[0])[-1] mask = array_ops.concat( [[1.0], 2.0 * array_ops.ones([input_last_dimension - 2 + is_odd]), array_ops.ones([1 - is_odd])], 0) rsize = math_ops.reciprocal(math_ops.to_float(_FFTSizeForGrad(grad, rank))) # The gradient of IRFFT is the RFFT of the incoming gradient times a scaling # factor and a mask. The mask scales the gradient for the Hermitian # symmetric components of the RFFT by a factor of two, since these # components are de-duplicated in the RFFT. rfft = rfft_fn(grad, fft_length) return rfft * math_ops.cast(rsize * mask, dtypes.complex64), None return _Grad
Example #11
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _LogGrad(op, grad): """Returns grad * (1/x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return grad * math_ops.reciprocal(x)
Example #12
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _Log1pGrad(op, grad): """Returns grad * (1/(1 + x)).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return grad * math_ops.reciprocal(1 + x)
Example #13
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _TanGrad(op, grad): """Returns grad * 1/sec^2(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) secx = math_ops.reciprocal(math_ops.cos(x)) secx2 = math_ops.square(secx) return grad * secx2
Example #14
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _AsinGrad(op, grad): """Returns grad * 1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) inv = math_ops.reciprocal(den) return grad * inv
Example #15
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _AcosGrad(op, grad): """Returns grad * -1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) inv = math_ops.reciprocal(den) return -grad * inv
Example #16
Source File: linalg_grad.py From keras-lambda with MIT License | 5 votes |
def _SelfAdjointEigV2Grad(op, grad_e, grad_v): """Gradient for SelfAdjointEigV2.""" e = op.outputs[0] v = op.outputs[1] # a = op.inputs[0], which satisfies # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i] with ops.control_dependencies([grad_e.op, grad_v.op]): if grad_v is not None: # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0). # Notice that because of the term involving f, the gradient becomes # infinite (or NaN in practice) when eigenvalues are not unique. # Mathematically this should not be surprising, since for (k-fold) # degenerate eigenvalues, the corresponding eigenvectors are only defined # up to arbitrary rotation in a (k-dimensional) subspace. f = array_ops.matrix_set_diag( math_ops.reciprocal( array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e)) grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e) + f * math_ops.matmul( v, grad_v, adjoint_a=True), v, adjoint_b=True)) else: grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e), v, adjoint_b=True)) # The forward op only depends on the lower triangular part of a, so here we # symmetrize and take the lower triangle grad_a = array_ops.matrix_band_part( grad_a + array_ops.matrix_transpose(grad_a), -1, 0) grad_a = array_ops.matrix_set_diag(grad_a, 0.5 * array_ops.matrix_diag_part(grad_a)) return grad_a
Example #17
Source File: nn_impl.py From keras-lambda with MIT License | 5 votes |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)
Example #18
Source File: core_test.py From keras-lambda with MIT License | 5 votes |
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes)
Example #19
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _Log1pGrad(op, grad): """Returns grad * (1/(1 + x)).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return grad * math_ops.reciprocal(1 + x)
Example #20
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _LogGrad(op, grad): """Returns grad * (1/x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return grad * math_ops.reciprocal(x)
Example #21
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _Log1pGrad(op, grad): """Returns grad * (1/(1 + x)).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return grad * math_ops.reciprocal(1 + x)
Example #22
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _TanGrad(op, grad): """Returns grad * 1/sec^2(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) secx = math_ops.reciprocal(math_ops.cos(x)) secx2 = math_ops.square(secx) return grad * secx2
Example #23
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _AsinGrad(op, grad): """Returns grad * 1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) inv = math_ops.reciprocal(den) return grad * inv
Example #24
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _AtanGrad(op, grad): """Returns grad * 1/ (1 + x^2).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) inv = math_ops.reciprocal(math_ops.add(one, x2)) return grad * inv
Example #25
Source File: linalg_grad.py From lambda-packs with MIT License | 5 votes |
def _SelfAdjointEigV2Grad(op, grad_e, grad_v): """Gradient for SelfAdjointEigV2.""" e = op.outputs[0] v = op.outputs[1] # a = op.inputs[0], which satisfies # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i] with ops.control_dependencies([grad_e.op, grad_v.op]): if grad_v is not None: # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0). # Notice that because of the term involving f, the gradient becomes # infinite (or NaN in practice) when eigenvalues are not unique. # Mathematically this should not be surprising, since for (k-fold) # degenerate eigenvalues, the corresponding eigenvectors are only defined # up to arbitrary rotation in a (k-dimensional) subspace. f = array_ops.matrix_set_diag( math_ops.reciprocal( array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e)) grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e) + f * math_ops.matmul( v, grad_v, adjoint_a=True), v, adjoint_b=True)) else: grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e), v, adjoint_b=True)) # The forward op only depends on the lower triangular part of a, so here we # symmetrize and take the lower triangle grad_a = array_ops.matrix_band_part( grad_a + array_ops.matrix_transpose(grad_a), -1, 0) grad_a = array_ops.matrix_set_diag(grad_a, 0.5 * array_ops.matrix_diag_part(grad_a)) return grad_a
Example #26
Source File: nn_impl.py From lambda-packs with MIT License | 5 votes |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)
Example #27
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _LogGrad(op, grad): """Returns grad * (1/x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return grad * math_ops.reciprocal(x)
Example #28
Source File: spectral_grad.py From lambda-packs with MIT License | 5 votes |
def _IRFFTGradHelper(rank, rfft_fn): """Returns a gradient function for an IRFFT of the provided rank.""" # Can't happen because we don't register a gradient for IRFFT3D. assert rank in (1, 2), "Gradient for IRFFT3D is not implemented." def _Grad(op, grad): """A gradient function for IRFFT with the provided `rank` and `rfft_fn`.""" # Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs # and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the # graph we special-case the situation where the FFT length and last # dimension of the input are known at graph construction time. fft_length = op.inputs[1] is_odd = math_ops.mod(fft_length[-1], 2) input_last_dimension = array_ops.shape(op.inputs[0])[-1] mask = array_ops.concat( [[1.0], 2.0 * array_ops.ones([input_last_dimension - 2 + is_odd]), array_ops.ones([1 - is_odd])], 0) rsize = math_ops.reciprocal(math_ops.to_float(_FFTSizeForGrad(grad, rank))) # The gradient of IRFFT is the RFFT of the incoming gradient times a scaling # factor and a mask. The mask scales the gradient for the Hermitian # symmetric components of the RFFT by a factor of two, since these # components are de-duplicated in the RFFT. rfft = rfft_fn(grad, fft_length) return rfft * math_ops.cast(rsize * mask, dtypes.complex64), None return _Grad
Example #29
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _TanGrad(op, grad): """Returns grad * 1/sec^2(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) secx = math_ops.reciprocal(math_ops.cos(x)) secx2 = math_ops.square(secx) return grad * secx2
Example #30
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _AsinGrad(op, grad): """Returns grad * 1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) inv = math_ops.reciprocal(den) return grad * inv