Python tensorflow.compat.v2.square() Examples

The following are 9 code examples of tensorflow.compat.v2.square(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: extensions_test.py    From trax with Apache License 2.0 5 votes vote down vote up
def loss_fn(params, inputs, targets):
  predicted = params[0] * inputs + params[1]
  loss = tf.reduce_mean(input_tensor=tf.square(predicted - targets))
  return tf_np.asarray(loss) 
Example #2
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def _init_norm(self):
        """Set the norm of the weight vector."""
        kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes))
        self.g.assign(kernel_norm) 
Example #3
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _rosenbrock(x):
  """See https://en.wikipedia.org/wiki/Rosenbrock_function."""
  term1 = 100 * tf.reduce_sum(tf.square(x[1:] - tf.square(x[:-1])))
  term2 = tf.reduce_sum(tf.square(1 - x[:-1]))
  return term1 + term2 
Example #4
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _mc_cormick(coord):
  """See https://www.sfu.ca/~ssurjano/mccorm.html."""
  x = coord[0]
  y = coord[1]
  return tf.sin(x + y) + tf.square(x - y) - 1.5 * x + 2.5 * y + 1 
Example #5
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_multiple_functions(self):
    # Define 3 independednt quadratic functions, each with its own minimum.
    minima = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
    func = lambda x: tf.reduce_sum(tf.square(x - minima), axis=1)
    self._check_algorithm(
        func=func, start_point=np.zeros_like(minima), expected_argmin=minima) 
Example #6
Source File: loss_fns.py    From valan with Apache License 2.0 5 votes vote down vote up
def _compute_baseline_loss(advantages, step):
  # Loss for the baseline, summed over the time dimension. Multiply by 0.5 to
  # match the standard update rule:
  #   d(loss) / d(baseline) = advantage
  baseline_cost = .5 * tf.square(advantages)
  tf.summary.scalar(
      'loss/baseline_cost', tf.reduce_mean(baseline_cost), step=step)
  return baseline_cost 
Example #7
Source File: parabolic_equation_stepper_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def testEuropeanCallDynamicVol(self):
    """Price for the European Call option with time-dependent volatility."""
    num_equations = 1  # Number of PDE
    num_grid_points = 1024  # Number of grid points
    dtype = np.float64
    # Build a log-uniform grid
    s_max = 300.
    grid = grids.log_uniform_grid(minimums=[0.01], maximums=[s_max],
                                  sizes=[num_grid_points],
                                  dtype=dtype)
    # Specify volatilities and interest rates for the options
    expiry = 1.0
    strike = 50.0

    # Volatility is of the form  `sigma**2(t) = 1 / 6 + 1 / 2 * t**2`.
    def second_order_coeff_fn(t, location_grid):
      return [[(1. / 6 + t**2 / 2) * tf.square(location_grid[0]) / 2]]

    @dirichlet
    def lower_boundary_fn(t, location_grid):
      del t, location_grid
      return 0

    @dirichlet
    def upper_boundary_fn(t, location_grid):
      del t
      return location_grid[0][-1] - strike

    final_values = tf.nn.relu(grid[0] - strike)
    # Broadcast to the shape of value dimension, if necessary.
    final_values += tf.zeros([num_equations, num_grid_points],
                             dtype=dtype)
    # Estimate European call option price
    estimate = fd_solvers.solve_backward(
        start_time=expiry,
        end_time=0,
        coord_grid=grid,
        values_grid=final_values,
        num_steps=None,
        start_step_count=0,
        time_step=tf.constant(0.01, dtype=dtype),
        one_step_fn=crank_nicolson_step(),
        boundary_conditions=[(lower_boundary_fn, upper_boundary_fn)],
        values_transform_fn=None,
        second_order_coeff_fn=second_order_coeff_fn,
        dtype=dtype)[0]

    value_grid = self.evaluate(estimate)[0, :]
    # Get two grid locations (correspond to spot 51.9537332 and 106.25407758,
    # respectively).
    loc_1 = 849
    # True call option price (obtained using black_scholes_price function)
    call_price = 12.582092
    self.assertAllClose(call_price, value_grid[loc_1], rtol=1e-02, atol=1e-02) 
Example #8
Source File: parabolic_equation_stepper_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def testCompareExpandedAndNotExpandedPdes(self):
    """Tests comparing PDEs with expanded derivatives and without.

    Take equation `u_{t} - [x^2 u]_{xx} + [x u]_{x} = 0`.
    Expanding the derivatives yields `u_{t} - x^2 u_{xx} - 3x u_{x} - u = 0`.
    Solve both equations and expect the results to be equal.
    """
    grid = grids.uniform_grid(
        minimums=[0], maximums=[1], sizes=[501], dtype=tf.float32)
    xs = grid[0]

    final_t = 0.1
    time_step = 0.001

    initial = _reference_pde_initial_cond(xs)  # arbitrary

    def inner_second_order_coeff_fn(t, coord_grid):
      del t
      x = coord_grid[0]
      return [[-tf.square(x)]]

    def inner_first_order_coeff_fn(t, coord_grid):
      del t
      x = coord_grid[0]
      return [x]

    result_not_expanded = fd_solvers.solve_forward(
        start_time=0,
        end_time=final_t,
        coord_grid=grid,
        values_grid=initial,
        time_step=time_step,
        inner_second_order_coeff_fn=inner_second_order_coeff_fn,
        inner_first_order_coeff_fn=inner_first_order_coeff_fn)[0]

    def second_order_coeff_fn(t, coord_grid):
      del t
      x = coord_grid[0]
      return [[-tf.square(x)]]

    def first_order_coeff_fn(t, coord_grid):
      del t
      x = coord_grid[0]
      return [-3 * x]

    def zeroth_order_coeff_fn(t, coord_grid):
      del t, coord_grid
      return -1

    result_expanded = fd_solvers.solve_forward(
        start_time=0,
        end_time=final_t,
        coord_grid=grid,
        values_grid=initial,
        time_step=time_step,
        second_order_coeff_fn=second_order_coeff_fn,
        first_order_coeff_fn=first_order_coeff_fn,
        zeroth_order_coeff_fn=zeroth_order_coeff_fn)[0]

    self.assertAllClose(
        result_not_expanded, result_expanded, atol=1e-3, rtol=1e-3) 
Example #9
Source File: helpers.py    From compression with Apache License 2.0 4 votes vote down vote up
def estimate_tails(func, target, shape, dtype):
  """Estimates approximate tail quantiles.

  This runs a simple Adam iteration to determine tail quantiles. The
  objective is to find an `x` such that:
  ```
  func(x) == target
  ```
  For instance, if `func` is a CDF and the target is a quantile value, this
  would find the approximate location of that quantile. Note that `func` is
  assumed to be monotonic. When each tail estimate has passed the optimal value
  of `x`, the algorithm does 10 additional iterations and then stops.

  This operation is vectorized. The tensor shape of `x` is given by `shape`, and
  `target` must have a shape that is broadcastable to the output of `func(x)`.

  Arguments:
    func: A callable that computes cumulative distribution function, survival
      function, or similar.
    target: The desired target value.
    shape: The shape of the `tf.Tensor` representing `x`.
    dtype: The `tf.dtypes.Dtype` of the computation (and the return value).

  Returns:
    A `tf.Tensor` representing the solution (`x`).
  """
  with tf.name_scope("estimate_tails"):
    dtype = tf.as_dtype(dtype)
    shape = tf.convert_to_tensor(shape, tf.int32)
    target = tf.convert_to_tensor(target, dtype)

    def loop_cond(tails, m, v, count):
      del tails, m, v  # unused
      return tf.reduce_min(count) < 10

    def loop_body(tails, m, v, count):
      with tf.GradientTape(watch_accessed_variables=False) as tape:
        tape.watch(tails)
        loss = abs(func(tails) - target)
      grad = tape.gradient(loss, tails)
      m = .5 * m + .5 * grad  # Adam mean estimate.
      v = .9 * v + .1 * tf.square(grad)  # Adam variance estimate.
      tails -= .5 * m / (tf.sqrt(v) + 1e-7)
      # Start counting when the gradient flips sign (note that this assumes
      # `tails` is initialized to zero).
      count = tf.where(
          tf.math.logical_or(count > 0, tails * grad > 0),
          count + 1, count)
      return tails, m, v, count

    init_tails = tf.zeros(shape, dtype=dtype)
    init_m = tf.zeros(shape, dtype=dtype)
    init_v = tf.ones(shape, dtype=dtype)
    init_count = tf.zeros(shape, dtype=tf.int32)
    return tf.while_loop(
        loop_cond, loop_body, (init_tails, init_m, init_v, init_count),
        back_prop=False)[0]