Python tensorflow.python.ops.math_ops.sign() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.sign().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: laplace.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) # Uniform variates must be sampled from the open-interval `(-1, 1)` rather # than `[-1, 1)`. In the case of `(0, 1)` we'd use # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest, # positive, "normal" number. However, the concept of subnormality exists # only at zero; here we need the smallest usable number larger than -1, # i.e., `-1 + eps/2`. uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log1p(-math_ops.abs(uniform_samples)))
Example #2
Source File: laplace.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) # Uniform variates must be sampled from the open-interval `(-1, 1)` rather # than `[-1, 1)`. In the case of `(0, 1)` we'd use # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest, # positive, "normal" number. However, the concept of subnormality exists # only at zero; here we need the smallest usable number larger than -1, # i.e., `-1 + eps/2`. uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log1p(-math_ops.abs(uniform_samples)))
Example #3
Source File: laplace.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat(0, ([n], self.batch_shape())) # Sample uniformly-at-random from the open-interval (-1, 1). uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log(1. - math_ops.abs(uniform_samples)))
Example #4
Source File: core_test.py From keras-lambda with MIT License | 5 votes |
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes)
Example #5
Source File: laplace.py From keras-lambda with MIT License | 5 votes |
def _cdf(self, x): y = x - self.loc return (0.5 + 0.5 * math_ops.sign(y) * (1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
Example #6
Source File: laplace.py From keras-lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat(([n], self.batch_shape()), 0) # Sample uniformly-at-random from the open-interval (-1, 1). uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log(1. - math_ops.abs(uniform_samples)))
Example #7
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" # TODO(b/27786104): The cast to complex could be removed once arithmetic # supports mixtures of complex64 and real values. return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(op.inputs[0]))
Example #8
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _AbsGrad(op, grad): x = op.inputs[0] return grad * math_ops.sign(x)
Example #9
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" # TODO(b/27786104): The cast to complex could be removed once arithmetic # supports mixtures of complex64 and real values. return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(op.inputs[0]))
Example #10
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _AbsGrad(op, grad): x = op.inputs[0] return grad * math_ops.sign(x)
Example #11
Source File: laplace.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _cdf(self, x): z = self._z(x) return (0.5 + 0.5 * math_ops.sign(z) * (1. - math_ops.exp(-math_ops.abs(z))))
Example #12
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def sign(x): """Element-wise sign. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.sign(x)
Example #13
Source File: sparse_optimizers.py From rigl with Apache License 2.0 | 5 votes |
def get_grow_tensor(self, weight, method): if method.startswith('grad_scale'): masked_grad = self._weight2masked_grads[weight.name] divisor = extract_number(method) grow_tensor = masked_grad / divisor elif method.startswith('grad_sign'): masked_grad_sign = math_ops.sign(self._weight2masked_grads[weight.name]) divisor = extract_number(method) grow_tensor = masked_grad_sign / divisor else: grow_tensor = super( SparseRigLOptimizer, self).get_grow_tensor(weight, method) return grow_tensor
Example #14
Source File: modrelu.py From AmusingPythonCodes with MIT License | 5 votes |
def modrelu(z, b, comp): if comp: z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm)) step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm)) else: z_norm = math_ops.abs(z) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = nn_ops.relu(step1) step3 = math_ops.sign(z) return math_ops.multiply(step3, step2)
Example #15
Source File: modrelu.py From rotational-unit-of-memory with MIT License | 5 votes |
def modrelu(z, b, comp): if comp: z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm)) step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm)) else: z_norm = math_ops.abs(z) + 0.00001 step1 = nn_ops.bias_add(z_norm, b) step2 = nn_ops.relu(step1) step3 = math_ops.sign(z) return math_ops.multiply(step3, step2)
Example #16
Source File: laplace.py From deep_image_model with Apache License 2.0 | 5 votes |
def _cdf(self, x): y = x - self.loc return (0.5 + 0.5 * math_ops.sign(y) * (1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
Example #17
Source File: utils.py From keras-adamw with MIT License | 5 votes |
def _apply_weight_decays(self, var, var_t): l1, l2 = self.weight_decays[var.name] if l1 == 0 and l2 == 0: if self.init_verbose and not self._init_notified: print("Both penalties are 0 for %s, will skip" % var.name) return var_t norm = math_ops.cast(math_ops.sqrt(self.batch_size / self.total_iterations_wd), 'float32') l1_normalized = l1 * norm l2_normalized = l2 * norm if l1 != 0 and l2 != 0: decay = l1_normalized * math_ops.sign(var) + l2_normalized * var elif l1 != 0: decay = l1_normalized * math_ops.sign(var) else: decay = l2_normalized * var var_t = var_t - self.eta_t * decay if self.init_verbose and not self._init_notified: norm_print = (self.batch_size / self.total_iterations_wd) ** (1 / 2) l1n_print, l2n_print = l1 * norm_print, l2 * norm_print decays_str = "{}(L1), {}(L2)".format(l1n_print, l2n_print) print('{} weight decay set for {}'.format(decays_str, var.name)) return var_t
Example #18
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" # TODO(b/27786104): The cast to complex could be removed once arithmetic # supports mixtures of complex64 and real values. return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(op.inputs[0]))
Example #19
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _AbsGrad(op, grad): x = op.inputs[0] return grad * math_ops.sign(x)
Example #20
Source File: core_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes)
Example #21
Source File: laplace.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _cdf(self, x): y = x - self.loc return (0.5 + 0.5 * math_ops.sign(y) * (1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
Example #22
Source File: laplace.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat(([n], self.batch_shape()), 0) # Sample uniformly-at-random from the open-interval (-1, 1). uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log(1. - math_ops.abs(uniform_samples)))
Example #23
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" # TODO(b/27786104): The cast to complex could be removed once arithmetic # supports mixtures of complex64 and real values. return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(op.inputs[0]))
Example #24
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _AbsGrad(op, grad): x = op.inputs[0] return grad * math_ops.sign(x)
Example #25
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def sign(x): """Element-wise sign. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.sign(x)
Example #26
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" # TODO(b/27786104): The cast to complex could be removed once arithmetic # supports mixtures of complex64 and real values. return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(op.inputs[0]))
Example #27
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _AbsGrad(op, grad): x = op.inputs[0] return grad * math_ops.sign(x)
Example #28
Source File: laplace.py From lambda-packs with MIT License | 5 votes |
def _cdf(self, x): z = self._z(x) return (0.5 + 0.5 * math_ops.sign(z) * (1. - math_ops.exp(-math_ops.abs(z))))
Example #29
Source File: indRNN.py From Text-Classification with Apache License 2.0 | 4 votes |
def build(self, inputs_shape): '''construct the IndRNN Cell''' if inputs_shape[1].value is None: raise ValueError("Expected input shape[1] is known") input_depth = inputs_shape[1] if self._input_kernel_initializer is None: self._input_kernel_initializer = init_ops.random_normal_initializer(mean=0, stddev=1e-3) # matrix W self._input_kernel = self.add_variable( "input_kernel", shape=[input_depth, self._num_units], initializer=self._input_kernel_initializer ) if self._recurrent_recurrent_kernel_initializer is None: self._recurrent_recurrent_kernel_initializer = init_ops.constant_initializer(1.) # matrix U self._recurrent_kernel = self.add_variable( "recurrent_kernel", shape=[self._num_units], initializer=self._recurrent_recurrent_kernel_initializer ) # Clip the U to min - max if self._recurrent_min_abs: abs_kernel = math_ops.abs(self._recurrent_kernel) min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs) self._recurrent_kernel = math_ops.multiply( math_ops.sign(self._recurrent_kernel), min_abs_kernel ) if self._recurrent_max_abs: self._recurrent_kernel = clip_ops.clip_by_value( self._recurrent_kernel, -self._recurrent_max_abs, self._recurrent_max_abs ) self._bias = self.add_variable( "bias", shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype) ) # built finished self.built = True
Example #30
Source File: ind_rnn_cell.py From indrnn with Apache License 2.0 | 4 votes |
def build(self, inputs_shape): if inputs_shape[1].value is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) input_depth = inputs_shape[1].value if self._input_initializer is None: self._input_initializer = init_ops.random_normal_initializer(mean=0.0, stddev=0.001) self._input_kernel = self.add_variable( "input_kernel", shape=[input_depth, self._num_units], initializer=self._input_initializer) if self._recurrent_initializer is None: self._recurrent_initializer = init_ops.constant_initializer(1.) self._recurrent_kernel = self.add_variable( "recurrent_kernel", shape=[self._num_units], initializer=self._recurrent_initializer) # Clip the absolute values of the recurrent weights to the specified minimum if self._recurrent_min_abs: abs_kernel = math_ops.abs(self._recurrent_kernel) min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs) self._recurrent_kernel = math_ops.multiply( math_ops.sign(self._recurrent_kernel), min_abs_kernel ) # Clip the absolute values of the recurrent weights to the specified maximum if self._recurrent_max_abs: self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel, -self._recurrent_max_abs, self._recurrent_max_abs) self._bias = self.add_variable( "bias", shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) self.built = True