Python tensorflow.cos() Examples

The following are 30 code examples of tensorflow.cos(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: helper.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def rotate_point_cloud_by_angle_z(batch_data, rotation_angle):
	""" Rotate the point cloud along up direction with certain angle.
		Input:
		  BxNx3 array, original batch of point clouds
		Return:
		  BxNx3 array, rotated batch of point clouds
	"""
	rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
	for k in range(batch_data.shape[0]):
		#rotation_angle = np.random.uniform() * 2 * np.pi
		cosval = np.cos(rotation_angle)
		sinval = np.sin(rotation_angle)
		rotation_matrix = np.array([[cosval, -sinval, 0],
									[sinval, cosval, 0],
									[0, 0, 1]])
		shape_pc = batch_data[k, ...]
		# rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
		rotated_data[k, ...] = np.dot(rotation_matrix, shape_pc.reshape((-1, 3)).T).T 		# Pre-Multiplication (changes done)
	return rotated_data

# Translate the data as per given translation vector. 
Example #2
Source File: label_network_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _position_encoding(position_size, dim, 
                    min_timescale=1.0,
                    max_timescale=1.0e4):
    position = tf.to_float(tf.range(position_size))
    num_timescales = dim // 2
    log_timescale_increment = (
        math.log(float(max_timescale) / float(min_timescale)) /
        (tf.to_float(num_timescales) - 1))
    inv_timescales = min_timescale * tf.exp(
        tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
    scaled_time = tf.expand_dims(position, 1) \
        * tf.expand_dims(inv_timescales, 0)
    signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
    signal = tf.pad(signal, [[0, 0], [0, tf.mod(dim, 2)]])
    signal = tf.reshape(signal, [1, position_size, dim])

    return signal 
Example #3
Source File: re_augument_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _position_encoding(position_size, dim, 
                    min_timescale=1.0,
                    max_timescale=1.0e4):
    position = tf.to_float(tf.range(position_size))
    num_timescales = dim // 2
    log_timescale_increment = (
        math.log(float(max_timescale) / float(min_timescale)) /
        (tf.to_float(num_timescales) - 1))
    inv_timescales = min_timescale * tf.exp(
        tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
    scaled_time = tf.expand_dims(position, 1) \
        * tf.expand_dims(inv_timescales, 0)
    signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
    signal = tf.pad(signal, [[0, 0], [0, tf.mod(dim, 2)]])
    signal = tf.reshape(signal, [1, position_size, dim])

    return signal 
Example #4
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def radial_cutoff(self, R, rc):
    """Calculates radial cutoff matrix.

    B = batch_size, N = max_num_atoms, M = max_num_neighbors

    Parameters
    ----------
      R [B, N, M]: tf.Tensor
        Distance matrix.
      rc: tf.Variable
        Interaction cutoff [Angstrom].

    Returns
    -------
    FC [B, N, M]: tf.Tensor
      Radial cutoff matrix.
    """
    T = 0.5 * (tf.cos(np.pi * R / (rc)) + 1)
    E = tf.zeros_like(T)
    cond = tf.less_equal(R, rc)
    FC = tf.where(cond, T, E)
    return FC 
Example #5
Source File: test_neuralODE.py    From astroNN with MIT License 6 votes vote down vote up
def test_ODEbadprecision(self):  # make sure float32 is not enough for very precise integration
        t = tf.constant(np.linspace(0, 10, 1000), dtype=tf.float32)
        # initial condition
        true_y0 = tf.constant([0., 5.], dtype=tf.float32)

        true_func = lambda y, t: np.sin(5*t)
        ode_func = lambda y, t: tf.cast(tf.stack([5*tf.cos(5*t), -25*tf.sin(5*t)]), tf.float32)
        true_y = odeint(ode_func, true_y0, t, method='dop853', precision=tf.float32)
        self.assertRaises(AssertionError, npt.assert_array_almost_equal, true_y.numpy()[:, 0], true_func(true_y0, t))

        true_y0_pretend_multidims = [[0., 5.]]  # to introduce a mix of list, np array, tensor to make sure no issue
        true_y_pretend_multidims = odeint(ode_func, true_y0_pretend_multidims, t, method='dop853', precision=tf.float32)

        # assert equal pretendinging multidim or not
        np.testing.assert_array_almost_equal(true_y_pretend_multidims[0], true_y)

        true_y0_multidims = tf.constant([[1., 2.], [0., 5.]], dtype=tf.float32)
        t = np.linspace(0, 10, 1000)
        true_y_multidims = odeint(ode_func, true_y0_multidims, t, method='dop853', precision=tf.float32)

        # assert equal in multidim or not
        np.testing.assert_array_almost_equal(true_y_multidims[1], true_y) 
Example #6
Source File: position_embedders.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def __init__(self, position_size, hparams=None):
        EmbedderBase.__init__(self, hparams=hparams)

        dim = self._hparams.dim
        num_timescales = dim // 2
        min_timescale = self._hparams.min_timescale
        max_timescale = self._hparams.max_timescale

        positions = tf.to_float(tf.range(position_size, dtype=tf.int32))
        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (tf.to_float(num_timescales) - 1))
        inv_timescales = min_timescale * tf.exp(
            tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
        scaled_time = tf.expand_dims(positions, 1) \
            * tf.expand_dims(inv_timescales, 0)
        signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
        signal = tf.pad(signal, [[0, 0], [0, tf.mod(dim, 2)]])
        self.signal = signal 
Example #7
Source File: common_layers.py    From BERT with Apache License 2.0 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example #8
Source File: transformer_layers.py    From nematus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_positional_signal(time_steps, depth, float_dtype, min_timescale=1, max_timescale=10000):
    """ Generates a series of sinusoid functions capable of expressing the relative and absolute position
    of a token within a longer sequence. """
    # Convert to floats
    min_timescale = tf.cast(min_timescale, float_dtype)
    max_timescale = tf.cast(max_timescale, float_dtype)
    # Obtain timing signal via sinusoids
    num_timescales = tf.cast(depth // 2, float_dtype)
    log_timescale_increment = tf.math.log(max_timescale / min_timescale) / (num_timescales - tf.cast(1.0, float_dtype))
    # Introduce an offset between individual timescales to obtain different frequencies
    incremented_timescales = \
        min_timescale * tf.exp(tf.range(num_timescales, dtype=float_dtype) * -log_timescale_increment)
    # Assign the designated number of time-scales per token position
    positions = tf.cast(tf.range(time_steps), float_dtype)
    scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(incremented_timescales, 0)
    positional_signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)

    # Pad the signal tensor, if needed
    pad_size = depth % 2
    if pad_size != 0:
        tf.pad(tensor=positional_signal, paddings=[[0, 0], [0, pad_size]])
    # Reshape the signal to make it compatible with the target tensor
    positional_signal = tf.reshape(positional_signal, [1, time_steps, depth])
    return positional_signal 
Example #9
Source File: ops.py    From mac-network with Apache License 2.0 6 votes vote down vote up
def locationPE(h, w, dim, outDim = -1, addBias = True):    
    x = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, w)), axis = -1)
    y = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, h)), axis = -1)
    i = tf.expand_dims(tf.to_float(tf.range(dim)), axis = 0)

    peSinX = tf.sin(x / (tf.pow(10000.0, i / dim)))
    peCosX = tf.cos(x / (tf.pow(10000.0, i / dim)))
    peSinY = tf.sin(y / (tf.pow(10000.0, i / dim)))
    peCosY = tf.cos(y / (tf.pow(10000.0, i / dim)))

    peSinX = tf.tile(tf.expand_dims(peSinX, axis = 0), [h, 1, 1])
    peCosX = tf.tile(tf.expand_dims(peCosX, axis = 0), [h, 1, 1])
    peSinY = tf.tile(tf.expand_dims(peSinY, axis = 1), [1, w, 1])
    peCosY = tf.tile(tf.expand_dims(peCosY, axis = 1), [1, w, 1]) 

    grid = tf.concat([peSinX, peCosX, peSinY, peCosY], axis = -1)
    dim *= 4
    
    if outDim > 0:
        grid = linear(grid, dim, outDim, addBias = addBias, name = "locationPE")
        dim = outDim

    return grid, dim 
Example #10
Source File: coarse_grain_model_v2.py    From gap with MIT License 6 votes vote down vote up
def add_timing_signal(x, scope='', min_timescale=1.0, max_timescale=1.0e4):
        with tf.name_scope(scope, values=[x]):
            length = tf.shape(x)[1]
            channels = tf.shape(x)[2]
            position = tf.to_float(tf.range(length))
            num_timescales = channels // 2

            log_timescale_increment = (
                math.log(float(max_timescale) / float(min_timescale)) /
                (tf.to_float(num_timescales) - 1)
            )
            inv_timescales = min_timescale * tf.exp(
                tf.to_float(tf.range(num_timescales)) * -log_timescale_increment
            )

            scaled_time = (tf.expand_dims(position, 1) *
                           tf.expand_dims(inv_timescales, 0))
            signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
            signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
            signal = tf.reshape(signal, [1, length, channels])

            return x + signal 
Example #11
Source File: helper.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def rotate_point_cloud_by_angle_x(batch_data, rotation_angle):
	""" Rotate the point cloud along up direction with certain angle.
		Input:
		  BxNx3 array, original batch of point clouds
		Return:
		  BxNx3 array, rotated batch of point clouds
	"""
	rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
	for k in range(batch_data.shape[0]):
		#rotation_angle = np.random.uniform() * 2 * np.pi
		cosval = np.cos(rotation_angle)
		sinval = np.sin(rotation_angle)
		rotation_matrix = np.array([[1, 0, 0],
									[0, cosval, -sinval],
									[0, sinval, cosval]])
		shape_pc = batch_data[k, ...]
		# rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
		rotated_data[k, ...] = np.dot(rotation_matrix, shape_pc.reshape((-1, 3)).T).T 		# Pre-Multiplication (changes done)
	return rotated_data 
Example #12
Source File: helper.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def rotate_point_cloud_by_angle_y(batch_data, rotation_angle):
	""" Rotate the point cloud along up direction with certain angle.
		Input:
		  BxNx3 array, original batch of point clouds
		Return:
		  BxNx3 array, rotated batch of point clouds
	"""
	rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
	for k in range(batch_data.shape[0]):
		#rotation_angle = np.random.uniform() * 2 * np.pi
		cosval = np.cos(rotation_angle)
		sinval = np.sin(rotation_angle)
		rotation_matrix = np.array([[cosval, 0, sinval],
									[0, 1, 0],
									[-sinval, 0, cosval]])
		shape_pc = batch_data[k, ...]
		# rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
		rotated_data[k, ...] = np.dot(rotation_matrix, shape_pc.reshape((-1, 3)).T).T 		# Pre-Multiplication (changes done)
	return rotated_data 
Example #13
Source File: image_ops.py    From TensorflowFramework with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def transform(image, landmark, translation=[0, 0], rotation=0, scale=1):
  """Apply an affine transformation to the image."""
  image = tf.convert_to_tensor(image)
  landmark = tf.convert_to_tensor(landmark, dtype=tf.float32)
  translation = tf.convert_to_tensor(translation, dtype=tf.float32)
  rotation = tf.convert_to_tensor(rotation, dtype=tf.float32)
  scale = tf.convert_to_tensor(scale, dtype=tf.float32)
  # Generate a transformation matrix
  h, w = image.shape.as_list()[-3:-1]
  tx, ty = tf.unstack(translation, axis=-1)
  sc = tf.cos(rotation) / scale
  ss = tf.sin(rotation) / scale
  cx = (sc - 1) * w * 0.5 + ss * h * 0.5
  cy = -ss * w * 0.5 + (sc - 1) * h * 0.5
  ze = tf.zeros_like(scale)
  # Apply transformation to image
  p = tf.transpose([sc, ss, -cx - tx, -ss, sc, -cy - ty, ze, ze])
  image_shape = image.shape
  image = tf.contrib.image.transform(image, p, interpolation="BILINEAR")
  image.set_shape(image_shape)
  # Apply transformation to landmarks
  a_r = tf.linalg.inv(tf.transpose([[sc, -ss], [ss, sc]]))
  a_t = tf.expand_dims(tf.transpose([cx + tx, cy + ty]), -2)
  landmark = tf.matmul(landmark + a_t, a_r, transpose_b=True)
  return image, landmark 
Example #14
Source File: initializer_ops.py    From TensorflowFramework with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    h_shape = shape[:]
    del h_shape[self.axis]
    angle = tf.random_uniform(h_shape, 0, 2 * np.pi, dtype)
    squared_radius = tf.random_uniform(
      h_shape, 0, tf.square(self.radius), dtype)
    radius = tf.sqrt(squared_radius)

    x = tf.sin(angle) * radius
    y = tf.cos(angle) * radius
    output = tf.stack([x, y], axis=self.axis)
    if self.mean:
      output += self.mean
    return output 
Example #15
Source File: uniform_distribution.py    From HyperGAN with MIT License 6 votes vote down vote up
def gaussian(config, gan, net):
    z_dim = net.get_shape().as_list()[-1]
    net = (net + 1) / 2

    if len(gan.ops.shape(net)) == 4:
        za = tf.slice(net, [0,0,0,0], [gan.batch_size(), -1, -1, z_dim//2])
        zb = tf.slice(net, [0,0,0,z_dim//2], [gan.batch_size(), -1, -1, z_dim//2])
    else:
        za = tf.slice(net, [0,0], [gan.batch_size(), z_dim//2])
        zb = tf.slice(net, [0,z_dim//2], [gan.batch_size(), z_dim//2])

    pi = np.pi
    ra = tf.sqrt(-2 * tf.log(za+TINY))*tf.cos(2*pi*zb)
    rb = tf.sqrt(-2 * tf.log(za+TINY))*tf.sin(2*pi*zb)

    return tf.reshape(tf.concat(axis=len(net.get_shape())-1, values=[ra, rb]), net.get_shape()) 
Example #16
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def radial_cutoff(self, R, rc):
    """Calculates radial cutoff matrix.

    B = batch_size, N = max_num_atoms, M = max_num_neighbors

    Parameters
    ----------
      R [B, N, M]: tf.Tensor
        Distance matrix.
      rc: tf.Variable
        Interaction cutoff [Angstrom].

    Returns
    -------
      FC [B, N, M]: tf.Tensor
        Radial cutoff matrix.

    """

    T = 0.5 * (tf.cos(np.pi * R / (rc)) + 1)
    E = tf.zeros_like(T)
    cond = tf.less_equal(R, rc)
    FC = tf.where(cond, T, E)
    return FC 
Example #17
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example #18
Source File: batch_lbs.py    From tf_smpl with MIT License 6 votes vote down vote up
def batch_rodrigues(theta, name=None):
    """
    Theta is N x 3
    """
    with tf.variable_scope(name, "batch_rodrigues", [theta]):
        batch_size = tf.shape(theta)[0]

        angle = tf.expand_dims(tf.norm(theta + 1e-8, axis=1), -1)
        r = tf.expand_dims(tf.div(theta, angle), -1)

        angle = tf.expand_dims(angle, -1)
        cos = tf.cos(angle)
        sin = tf.sin(angle)

        outer = tf.matmul(r, r, transpose_b=True, name="outer")

        eyes = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])
        R = cos * eyes + (1 - cos) * outer + sin * batch_skew(
            r, batch_size=batch_size)
        return R 
Example #19
Source File: train_siamese_model.py    From SiamFC-TensorFlow with MIT License 6 votes vote down vote up
def _configure_learning_rate(train_config, global_step):
  lr_config = train_config['lr_config']

  num_batches_per_epoch = \
    int(train_config['train_data_config']['num_examples_per_epoch'] / train_config['train_data_config']['batch_size'])

  lr_policy = lr_config['policy']
  if lr_policy == 'piecewise_constant':
    lr_boundaries = [int(e * num_batches_per_epoch) for e in lr_config['lr_boundaries']]
    return tf.train.piecewise_constant(global_step,
                                       lr_boundaries,
                                       lr_config['lr_values'])
  elif lr_policy == 'exponential':
    decay_steps = int(num_batches_per_epoch) * lr_config['num_epochs_per_decay']
    return tf.train.exponential_decay(lr_config['initial_lr'],
                                      global_step,
                                      decay_steps=decay_steps,
                                      decay_rate=lr_config['lr_decay_factor'],
                                      staircase=lr_config['staircase'])
  elif lr_policy == 'cosine':
    T_total = train_config['train_data_config']['epoch'] * num_batches_per_epoch
    return 0.5 * lr_config['initial_lr'] * (1 + tf.cos(np.pi * tf.to_float(global_step) / T_total))
  else:
    raise ValueError('Learning rate policy [%s] was not recognized', lr_policy) 
Example #20
Source File: quaternion.py    From graphics with Apache License 2.0 5 votes vote down vote up
def from_axis_angle(axis, angle, name=None):
  """Converts an axis-angle representation to a quaternion.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
      represents a normalized axis.
    angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
      represents an angle.
    name: A name for this op that defaults to "quaternion_from_axis_angle".

  Returns:
    A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
    a normalized quaternion.

  Raises:
    ValueError: If the shape of `axis` or `angle` is not supported.
  """
  with tf.compat.v1.name_scope(name, "quaternion_from_axis_angle",
                               [axis, angle]):
    axis = tf.convert_to_tensor(value=axis)
    angle = tf.convert_to_tensor(value=angle)

    shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
    shape.check_static(
        tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
    shape.compare_batch_dimensions(
        tensors=(axis, angle), last_axes=-2, broadcast_compatible=True)
    axis = asserts.assert_normalized(axis)

    half_angle = 0.5 * angle
    w = tf.cos(half_angle)
    xyz = tf.sin(half_angle) * axis
    return tf.concat((xyz, w), axis=-1) 
Example #21
Source File: spherical_harmonics.py    From graphics with Apache License 2.0 5 votes vote down vote up
def _evaluate_spherical_harmonics_branch(degree,
                                         order,
                                         theta,
                                         phi,
                                         sign_order,
                                         var_type=tf.float64):
  sqrt_2 = tf.constant(1.41421356237, dtype=var_type)
  order_float = tf.cast(order, dtype=var_type)
  tmp = sqrt_2 * _spherical_harmonics_normalization(
      degree, order, var_type) * evaluate_legendre_polynomial(
          degree, order, tf.cos(theta))
  positive = tmp * tf.cos(order_float * phi)
  negative = tmp * tf.sin(order_float * phi)
  return tf.compat.v1.where(tf.greater(sign_order, 0), positive, negative)
  # pylint: enable=missing-docstring 
Example #22
Source File: math_helpers.py    From graphics with Apache License 2.0 5 votes vote down vote up
def spherical_to_cartesian_coordinates(point_spherical, name=None):
  """Function to transform Cartesian coordinates to spherical coordinates.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    point_spherical: A tensor of shape `[A1, ..., An, 3]`. The last dimension
      contains r, theta, and phi that respectively correspond to the radius,
      polar angle and azimuthal angle; r must be non-negative.
    name: A name for this op. Defaults to 'spherical_to_cartesian_coordinates'.

  Raises:
    tf.errors.InvalidArgumentError: If r, theta or phi contains out of range
    data.

  Returns:
    A tensor of shape `[A1, ..., An, 3]`, where the last dimension contains the
    cartesian coordinates in x,y,z order.
  """
  with tf.compat.v1.name_scope(name, "spherical_to_cartesian_coordinates",
                               [point_spherical]):
    point_spherical = tf.convert_to_tensor(value=point_spherical)

    shape.check_static(
        tensor=point_spherical,
        tensor_name="point_spherical",
        has_dim_equals=(-1, 3))

    r, theta, phi = tf.unstack(point_spherical, axis=-1)
    r = asserts.assert_all_above(r, 0)
    tmp = r * tf.sin(theta)
    x = tmp * tf.cos(phi)
    y = tmp * tf.sin(phi)
    z = r * tf.cos(theta)
    return tf.stack((x, y, z), axis=-1) 
Example #23
Source File: qanet_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
    """Gets a bunch of sinusoids of different frequencies.
    Each channel of the input Tensor is incremented by a sinusoid of a different
    frequency and phase.
    This allows attention to learn to use absolute and relative positions.
    Timing signals should be added to some precursors of both the query and the
    memory inputs to attention.
    The use of relative position is possible because sin(x+y) and cos(x+y) can be
    experessed in terms of y, sin(x) and cos(x).
    In particular, we use a geometric sequence of timescales starting with
    min_timescale and ending with max_timescale.  The number of different
    timescales is equal to channels / 2. For each timescale, we
    generate the two sinusoidal signals sin(timestep/timescale) and
    cos(timestep/timescale).  All of these sinusoids are concatenated in
    the channels dimension.
    Args:
    length: scalar, length of timing signal sequence.
    channels: scalar, size of timing embeddings to create. The number of
        different timescales is equal to channels / 2.
    min_timescale: a float
    max_timescale: a float
    Returns:
    a Tensor of timing signals [1, length, channels]
    """
    position = tf.to_float(tf.range(length))
    num_timescales = channels // 2
    log_timescale_increment = (
        math.log(float(max_timescale) / float(min_timescale)) /
            (tf.to_float(num_timescales) - 1))
    inv_timescales = min_timescale * tf.exp(
        tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
    scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
    signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
    signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
    signal = tf.reshape(signal, [1, length, channels])
    return signal 
Example #24
Source File: attention.py    From THUMT with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def add_timing_signal(x, min_timescale=1.0, max_timescale=1.0e4, name=None):
    """
    This function adds a bunch of sinusoids of different frequencies to a
    Tensor. See paper: `Attention is all you need'

    :param x: A tensor with shape [batch, length, channels]
    :param min_timescale: A floating point number
    :param max_timescale: A floating point number
    :param name: An optional string

    :returns: a Tensor the same shape as x.
    """

    with tf.name_scope(name, default_name="add_timing_signal", values=[x]):
        length = tf.shape(x)[1]
        channels = tf.shape(x)[2]
        position = tf.to_float(tf.range(length))
        num_timescales = channels // 2

        log_timescale_increment = (
                math.log(float(max_timescale) / float(min_timescale)) /
                (tf.to_float(num_timescales) - 1)
        )
        inv_timescales = min_timescale * tf.exp(
            tf.to_float(tf.range(num_timescales)) * -log_timescale_increment
        )

        scaled_time = (tf.expand_dims(position, 1) *
                       tf.expand_dims(inv_timescales, 0))
        signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
        signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
        signal = tf.reshape(signal, [1, length, channels])

        return x + tf.cast(signal, x.dtype) 
Example #25
Source File: qanet_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
    """Adds a bunch of sinusoids of different frequencies to a Tensor.
    Each channel of the input Tensor is incremented by a sinusoid of a different
    frequency and phase.
    This allows attention to learn to use absolute and relative positions.
    Timing signals should be added to some precursors of both the query and the
    memory inputs to attention.
    The use of relative position is possible because sin(x+y) and cos(x+y) can be
    experessed in terms of y, sin(x) and cos(x).
    In particular, we use a geometric sequence of timescales starting with
    min_timescale and ending with max_timescale.  The number of different
    timescales is equal to channels / 2. For each timescale, we
    generate the two sinusoidal signals sin(timestep/timescale) and
    cos(timestep/timescale).  All of these sinusoids are concatenated in
    the channels dimension.
    Args:
    x: a Tensor with shape [batch, length, channels]
    min_timescale: a float
    max_timescale: a float
    Returns:
    a Tensor the same shape as x.
    """
    length = tf.shape(x)[1]
    channels = tf.shape(x)[2]
    signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
    return x + signal 
Example #26
Source File: modeling.py    From XLnet-gen with MIT License 5 votes vote down vote up
def positional_embedding(pos_seq, inv_freq, bsz=None):
  sinusoid_inp = tf.einsum('i,d->id', pos_seq, inv_freq)
  pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
  pos_emb = pos_emb[:, None, :]

  if bsz is not None:
    pos_emb = tf.tile(pos_emb, [1, bsz, 1])

  return pos_emb 
Example #27
Source File: common.py    From KPConv with MIT License 5 votes vote down vote up
def random_rotation_tensor(self, config):

        if config.augment_rotation == 'none':
            return np.eye(3, dtype=np.float32)

        if config.augment_rotation == 'vertical':

            # random angle
            theta = np.random.rand() * 2 * np.pi

            # Rotation matrix
            c, s = np.cos(theta), np.sin(theta)
            R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)

            # Rotate points
            return R

        if config.augment_rotation == 'axes':

            # random angle
            theta = np.random.rand() * 2 * np.pi
            axis = np.random.randint(3)

            # Rotation matrix
            c, s = np.cos(theta), np.sin(theta)
            R = np.array([[c, -s, -s], [s, c, -s], [s, s, c]], dtype=np.float32)
            R[:, axis] = 0
            R[axis, :] = 0
            R[axis, axis] = 1

            # Rotate points
            return R 
Example #28
Source File: smpl_tf.py    From SMPL with MIT License 5 votes vote down vote up
def rodrigues(r):
  """
  Rodrigues' rotation formula that turns axis-angle tensor into rotation
  matrix in a batch-ed manner.

  Parameter:
  ----------
  r: Axis-angle rotation tensor of shape [batch_size, 1, 3].

  Return:
  -------
  Rotation matrix of shape [batch_size, 3, 3].

  """
  theta = tf.norm(r + tf.random_normal(r.shape, 0, 1e-8, dtype=tf.float64), axis=(1, 2), keepdims=True)
  # avoid divide by zero
  r_hat = r / theta
  cos = tf.cos(theta)
  z_stick = tf.zeros(theta.get_shape().as_list()[0], dtype=tf.float64)
  m = tf.stack(
    (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
     -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), axis=1)
  m = tf.reshape(m, (-1, 3, 3))
  i_cube = tf.expand_dims(tf.eye(3, dtype=tf.float64), axis=0) + tf.zeros(
    (theta.get_shape().as_list()[0], 3, 3), dtype=tf.float64)
  A = tf.transpose(r_hat, (0, 2, 1))
  B = r_hat
  dot = tf.matmul(A, B)
  R = cos * i_cube + (1 - cos) * dot + tf.sin(theta) * m
  return R 
Example #29
Source File: transformers.py    From PADME with MIT License 5 votes vote down vote up
def distance_cutoff(self, d, cutoff, flags):
    """ Generate distance matrix with trainable cutoff """
    # Cutoff with threshold Rc
    d_flag = flags * tf.sign(cutoff - d)
    d_flag = tf.nn.relu(d_flag)
    d_flag = d_flag * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
    d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
    return d * d_flag 
Example #30
Source File: layers.py    From PADME with MIT License 5 votes vote down vote up
def distance_cutoff(self, d, cutoff, flags):
    """ Generate distance matrix with trainable cutoff """
    # Cutoff with threshold Rc
    d_flag = flags * tf.sign(cutoff - d)
    d_flag = tf.nn.relu(d_flag)
    d_flag = d_flag * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
    d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
    return d * d_flag
    # return d