Python tensorflow.python.ops.math_ops.complex() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.complex().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #2
Source File: math_grad.py From keras-lambda with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #3
Source File: math_grad.py From lambda-packs with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #4
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #5
Source File: EUNN.py From AmusingPythonCodes with MIT License | 6 votes |
def __call__(self, inputs, state, scope=None): with vs.variable_scope(scope or "eunn_cell"): state = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft) input_matrix_init = init_ops.random_uniform_initializer(-0.01, 0.01) if self._comp: input_matrix_re = vs.get_variable("U_re", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init) input_matrix_im = vs.get_variable("U_im", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init) inputs_re = math_ops.matmul(inputs, input_matrix_re) inputs_im = math_ops.matmul(inputs, input_matrix_im) inputs = math_ops.complex(inputs_re, inputs_im) else: input_matrix = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init) inputs = math_ops.matmul(inputs, input_matrix) bias = vs.get_variable("modReLUBias", [self._hidden_size], initializer=init_ops.constant_initializer()) output = self._activation((inputs + state), bias, self._comp) return output, output
Example #6
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = math_ops.select( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape( math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #7
Source File: linear_operator_test_util.py From lambda-packs with MIT License | 6 votes |
def test_log_abs_det(self): self._skip_if_tests_to_skip_contains("log_abs_det") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: if dtype.is_complex: self.skipTest( "tf.matrix_determinant does not work with complex, so this " "test is being skipped.") with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_log_abs_det = operator.log_abs_determinant() mat_log_abs_det = math_ops.log( math_ops.abs(linalg_ops.matrix_determinant(mat))) if not use_placeholder: self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape()) op_log_abs_det_v, mat_log_abs_det_v = sess.run( [op_log_abs_det, mat_log_abs_det], feed_dict=feed_dict) self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
Example #8
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _FFT3DGrad(_, grad): size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32) return math_ops.ifft3d(grad) * math_ops.complex(size, 0.)
Example #9
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _IFFTGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32) return math_ops.fft(grad) * math_ops.complex(rsize, 0.)
Example #10
Source File: linear_operator_test_util.py From keras-lambda with MIT License | 5 votes |
def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_sign_uniform"): unsigned_samples = random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign( random_ops.random_uniform( shape, minval=-1., maxval=1., seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
Example #11
Source File: linear_operator_test_util.py From keras-lambda with MIT License | 5 votes |
def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
Example #12
Source File: linear_operator_test_util.py From keras-lambda with MIT License | 5 votes |
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
Example #13
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _IFFT3DGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32) return math_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
Example #14
Source File: spectral_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _FFT3DGrad(_, grad): size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32) return spectral_ops.ifft3d(grad) * math_ops.complex(size, 0.)
Example #15
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _IFFT2DGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32) return math_ops.fft2d(grad) * math_ops.complex(rsize, 0.)
Example #16
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _FFT2DGrad(_, grad): size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32) return math_ops.ifft2d(grad) * math_ops.complex(size, 0.)
Example #17
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _IFFTGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32) return math_ops.fft(grad) * math_ops.complex(rsize, 0.)
Example #18
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _RealGrad(_, grad): """Returns 'grad' as the real part and set the imaginary part 0.""" zero = constant_op.constant(0, dtype=grad.dtype) return math_ops.complex(grad, zero)
Example #19
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _ImagGrad(_, grad): """Returns 'grad' as the imaginary part and set the real part 0.""" zero = constant_op.constant(0, dtype=grad.dtype) return math_ops.complex(zero, grad)
Example #20
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _RealGrad(_, grad): """Returns 'grad' as the real part and set the imaginary part 0.""" zero = constant_op.constant(0, dtype=grad.dtype) return math_ops.complex(grad, zero)
Example #21
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _ConjGrad(_, grad): """Returns the complex conjugate of grad.""" return math_ops.conj(grad)
Example #22
Source File: spectral_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _IFFT2DGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32) return spectral_ops.fft2d(grad) * math_ops.complex(rsize, 0.)
Example #23
Source File: spectral_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _FFT2DGrad(_, grad): size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32) return spectral_ops.ifft2d(grad) * math_ops.complex(size, 0.)
Example #24
Source File: spectral_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _IFFTGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32) return spectral_ops.fft(grad) * math_ops.complex(rsize, 0.)
Example #25
Source File: spectral_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _FFTGrad(_, grad): size = math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32) return spectral_ops.ifft(grad) * math_ops.complex(size, 0.)
Example #26
Source File: modrelu.py From AmusingPythonCodes with MIT License | 5 votes |
def modrelu(z, b, comp): if comp: z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm)) step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm)) else: z_norm = math_ops.abs(z) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = nn_ops.relu(step1) step3 = math_ops.sign(z) return math_ops.multiply(step3, step2)
Example #27
Source File: EUNN.py From rotational-unit-of-memory with MIT License | 5 votes |
def __call__(self, inputs, state, scope=None): with vs.variable_scope(scope or "eunn_cell"): state = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft) input_matrix_init = init_ops.random_uniform_initializer( -0.01, 0.01) if self._comp: input_matrix_re = vs.get_variable("U_re", [inputs.get_shape( )[-1], self._hidden_size], initializer=input_matrix_init) input_matrix_im = vs.get_variable("U_im", [inputs.get_shape( )[-1], self._hidden_size], initializer=input_matrix_init) inputs_re = math_ops.matmul(inputs, input_matrix_re) inputs_im = math_ops.matmul(inputs, input_matrix_im) inputs = math_ops.complex(inputs_re, inputs_im) else: input_matrix = vs.get_variable( "U", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init) inputs = math_ops.matmul(inputs, input_matrix) bias = vs.get_variable( "modReLUBias", [self._hidden_size], initializer=init_ops.constant_initializer()) output = self._activation((inputs + state), bias, self._comp) return output, output
Example #28
Source File: modrelu.py From rotational-unit-of-memory with MIT License | 5 votes |
def modrelu(z, b, comp): if comp: z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm)) step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm)) else: z_norm = math_ops.abs(z) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = nn_ops.relu(step1) step3 = math_ops.sign(z) return math_ops.multiply(step3, step2)
Example #29
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _IFFT3DGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32) return math_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
Example #30
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _FFT3DGrad(_, grad): size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32) return math_ops.ifft3d(grad) * math_ops.complex(size, 0.)