Python tensorflow.python.ops.math_ops.sin() Examples
The following are 12
code examples of tensorflow.python.ops.math_ops.sin().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _CosGrad(op, grad): """Returns grad * -sin(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return -grad * math_ops.sin(x)
Example #2
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def sin(x): """Computes sin of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.sin(x)
Example #3
Source File: image_ops.py From lambda-packs with MIT License | 5 votes |
def angles_to_projective_transforms(angles, image_height, image_width): """Returns projective transform(s) for the given angle(s). Args: angles: A scalar angle to rotate all images by, or (for batches of images) a vector with an angle to rotate each image in the batch. image_height: Height of the image(s) to be transformed. image_width: Width of the image(s) to be transformed. Returns: A tensor of shape (num_images, 8). Projective transforms which can be given to `tf.contrib.image.transform`. """ angle_or_angles = ops.convert_to_tensor( angles, name="angles", dtype=dtypes.float32) if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test angles = angle_or_angles[None] elif len(angle_or_angles.get_shape()) == 1: angles = angle_or_angles else: raise TypeError("Angles should have rank 0 or 1.") x_offset = ((image_width - 1) - (math_ops.cos(angles) * (image_width - 1) - math_ops.sin(angles) * (image_height - 1))) / 2.0 y_offset = ((image_height - 1) - (math_ops.sin(angles) * (image_width - 1) + math_ops.cos(angles) * (image_height - 1))) / 2.0 num_angles = array_ops.shape(angles)[0] return array_ops.concat( values=[ math_ops.cos(angles)[:, None], -math_ops.sin(angles)[:, None], x_offset[:, None], math_ops.sin(angles)[:, None], math_ops.cos(angles)[:, None], y_offset[:, None], array_ops.zeros((num_angles, 2), dtypes.float32), ], axis=1)
Example #4
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _CosGrad(op, grad): """Returns grad * -sin(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return -grad * math_ops.sin(x)
Example #5
Source File: core_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes)
Example #6
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _CosGrad(op, grad): """Returns grad * -sin(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return -grad * math_ops.sin(x)
Example #7
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_sin(data): """ One iteration of sin """ return _test_unary_elemwise(math_ops.sin, data) ####################################################################### # Cos # ---
Example #8
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def sin(x): """Computes sin of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.sin(x)
Example #9
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _CosGrad(op, grad): """Returns grad * -sin(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return -grad * math_ops.sin(x)
Example #10
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _CosGrad(op, grad): """Returns grad * -sin(x).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) return -grad * math_ops.sin(x)
Example #11
Source File: core_test.py From keras-lambda with MIT License | 5 votes |
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes)
Example #12
Source File: embedding.py From NJUNMT-tf with Apache License 2.0 | 4 votes |
def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be experessed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ channels = x.get_shape().as_list()[-1] if x.get_shape().ndims == 3: # [batch_size, timesteps, dim] length = array_ops.shape(x)[1] position = math_ops.to_float(math_ops.range(length)) elif x.get_shape().ndims == 2: # [batch_size, dim] length = 1 position = math_ops.to_float(math_ops.range(time, time + 1)) else: raise ValueError("need a Tensor with rank 2 or 3") num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (math_ops.to_float(num_timescales) - 1)) inv_timescales = min_timescale * math_ops.exp( math_ops.to_float(math_ops.range(num_timescales)) * -log_timescale_increment) scaled_time = array_ops.expand_dims(position, 1) * array_ops.expand_dims(inv_timescales, 0) signal = array_ops.concat([math_ops.sin(scaled_time), math_ops.cos(scaled_time)], axis=1) signal = array_ops.pad(signal, [[0, 0], [0, math_ops.mod(channels, 2)]]) if x.get_shape().ndims == 3: signal = array_ops.reshape(signal, [1, length, channels]) else: signal = array_ops.reshape(signal, [1, channels]) return x + signal