Python tensorflow.compat.v2.size() Examples

The following are 18 code examples of tensorflow.compat.v2.size(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: euler_sampling.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _while_loop(*, dim, steps_num, current_state,
                drift_fn, volatility_fn, wiener_mean,
                num_samples, times, dt, sqrt_dt, time_step, num_requested_times,
                keep_mask, swap_memory, random_type, seed, normal_draws):
  """Smaple paths using tf.while_loop."""
  cond_fn = lambda i, *args: i < steps_num
  def step_fn(i, written_count, current_state, result):
    return _euler_step(
        i=i,
        written_count=written_count,
        current_state=current_state,
        result=result,
        drift_fn=drift_fn,
        volatility_fn=volatility_fn,
        wiener_mean=wiener_mean,
        num_samples=num_samples,
        times=times,
        dt=dt,
        sqrt_dt=sqrt_dt,
        keep_mask=keep_mask,
        random_type=random_type,
        seed=seed,
        normal_draws=normal_draws)
  maximum_iterations = (tf.cast(1. / time_step, dtype=tf.int32)
                        + tf.size(times))
  result = tf.zeros((num_samples, num_requested_times, dim),
                    dtype=current_state.dtype)
  _, _, _, result = tf.while_loop(
      cond_fn, step_fn, (0, 0, current_state, result),
      maximum_iterations=maximum_iterations,
      swap_memory=swap_memory)
  return result 
Example #2
Source File: input_pipeline.py    From models with Apache License 2.0 5 votes vote down vote up
def get_input_dataset(input_file_pattern,
                      batch_size,
                      params,
                      is_training,
                      strategy=None):
  """Returns input dataset from input file string."""

  # When using TPU pods, we need to clone dataset across
  # workers and need to pass in function that returns the dataset rather
  # than passing dataset instance itself.
  use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy)
  if use_dataset_fn:
    if batch_size % strategy.num_replicas_in_sync != 0:
      raise ValueError(
          "Batch size must be divisible by number of replicas : {}".format(
              strategy.num_replicas_in_sync))

    # As auto rebatching is not supported in
    # `experimental_distribute_datasets_from_function()` API, which is
    # required when cloning dataset to multiple workers in eager mode,
    # we use per-replica batch size.
    batch_size = int(batch_size / strategy.num_replicas_in_sync)

  def _dataset_fn(ctx=None):
    """Returns tf.data.Dataset for distributed BERT pretraining."""
    input_files = []
    for input_pattern in input_file_pattern.split(","):
      input_files.extend(tf.io.gfile.glob(input_pattern))

    return create_dataset(
        input_files,
        batch_size,
        params,
        is_training=is_training,
        input_pipeline_context=ctx)

  if use_dataset_fn:
    return strategy.experimental_distribute_datasets_from_function(_dataset_fn)
  else:
    return strategy.experimental_distribute_dataset(_dataset_fn()) 
Example #3
Source File: input_pipeline.py    From models with Apache License 2.0 5 votes vote down vote up
def _filter_max_length(example, max_title_length=256):
  """Indicates whether the example's length is lower than the maximum length."""
  return tf.size(example["targets"]) <= max_title_length 
Example #4
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def parallel_conv2d(inputs, filters, strides, padding):
  """Applies each filter in the batch of filters to each input.

  tf.nn.conv2d only supports applying the same filter on a batch of inputs.
  This function provides a similar interface, but allowing a batch of filters,
  a different one for each input.

  In the below definitions, B is the batch size, H and W are spatial input or
  output dimensions (overloaded between input and output), C1 is the input
  number of channels, C2 is output number of channels, KHxKW is the
  convolutional kernel spatial size.

  Args:
    inputs: BxHxWxC1 tensor - batch of input "images"
    filters: BxKHxKWxC1xC2 tensor - batch of convolutional kernels
    strides: See tf.nn.conv2d arg: strides
    padding: See tf.nn.conv2d arg: padding

  Returns:
    Tensor of shape BxHxWxC2
  """
  batch_size = inputs.shape[0]

  output_slices = [tf.nn.conv2d(inputs[i:i+1], filters[i], strides, padding)
                   for i in range(batch_size)]
  output = tf.stack(output_slices, axis=0)
  # Each output slice has a batch dimension of size 1. Get rid of it.
  assert output.shape[1] == 1, 'Each slice should have batch size of 1'
  output = output[:, 0, :, :, :]
  # Output should have same batch size and spatial dimensions as input, but
  # the number of channels is determined by the convolution filter
  assert_shape((batch_size, inputs.shape[1], inputs.shape[2], filters.shape[4]),
               output.shape)
  return output 
Example #5
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def circular_pad(input_tensor, axis, padding):
  """Pads tensor circularly.

  More specifically, pad on the right with the tensor values from the left of
  the tensor, as if you had concatenated the tensor on the right and vice versa.

  Args:
    input_tensor: typically a batch of input "images"
    axis: on which to perform the circluar padding
    padding: See tf.nn.conv2d arg: padding

  Returns:
    Tensor of shape BxHxWxC2
  """
  assert 0 <= axis < len(input_tensor.shape), 'Axis out of bounds'
  multiples = [1] * len(input_tensor.shape)
  multiples[axis] = 3
  tiled_input = tf.tile(input_tensor, multiples)
  left = input_tensor.shape[axis] - padding[0]
  right = 2 * input_tensor.shape[axis] + padding[1]

  begin = [0] * len(input_tensor.shape)
  end = list(input_tensor.shape)
  begin[axis] = left
  end[axis] = right
  size = [a - b for a, b in zip(end, begin)]

  output_tensor = tf.slice(tiled_input, begin, size)
  # Do a shape assert
  return output_tensor 
Example #6
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def get_first_true_column(x):
  """Transforms `x` into a tensor which has all elements set to False except the first True in the column.

  If x is [[True, False, False],
           [True, False, False],
           [False, True, False],
           [False, True, True]]
  the output should be
          [[True, False, False],
           [False, False, False],
           [False, True, False],
           [False, False, True]
          ]

  Args:
    x: A bool tensor with shape [num_steps, batch_size]

  Returns:
    A bool tensor with the same shape.
  """
  x = tf.transpose(x, perm=[1, 0])
  # Get indices
  y = tf.where(tf.equal(x, True))
  # Find first column in every row which is True
  first_true_cols = tf.cast(
      tf.math.segment_min(data=y[:, 1], segment_ids=y[:, 0]), tf.int32)
  # Convert back to indices
  first_true_indices = tf.stack(
      [tf.range(tf.size(first_true_cols)), first_true_cols], axis=1)
  # Now create the mask
  first_true_mask_sparse = tf.SparseTensor(
      indices=tf.cast(first_true_indices, tf.int64),
      values=tf.ones([tf.size(first_true_cols)], dtype=tf.bool),
      dense_shape=x.shape)
  first_true_mask = tf.sparse.to_dense(
      first_true_mask_sparse, default_value=False)
  return tf.transpose(first_true_mask, perm=[1, 0]) 
Example #7
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def gather_from_dict(tensor_dict, choice):
  """Chooses tensor values along first dimension using given choice.

  If `tensor_dict` = {
    0: zeros(shape=(6)),
    1: ones(shape=(6)),
    2: twos(shape=(6)),
    3: threes(shape=(6))
  }
  and choice = [0, 0, 2, 2, 1, 0]
  then returned tensor is [0., 0., 2., 2., 1., 0.]

  Args:
    tensor_dict: A dict with int keys and tensor values. All tensor values must
      be of same type and shape.
    choice: A 1-d int tensor with number of elements equal to first dimension of
      tensors in `tensor_dict`. The values in the tensor must be valid keys in
      `tensor_dict`.

  Returns:
    A tensor of same type and shape as tensors in `tensor_dict`.
  """
  one_tensor = next(iter(tensor_dict.values()))

  # Check number of elements in `choice`.
  tf.debugging.assert_rank(choice, rank=1)
  tf.debugging.assert_equal(tf.size(choice), tf.shape(one_tensor)[0])

  zeros_tensor = tf.zeros_like(one_tensor)
  final_tensor = zeros_tensor
  for c, t in tensor_dict.items():
    # Check shapes and type
    tf.debugging.assert_equal(tf.shape(t), tf.shape(one_tensor))
    tf.debugging.assert_type(t, tf_type=one_tensor.dtype)
    final_tensor += tf.compat.v1.where(tf.equal(choice, c), t, zeros_tensor)
  return final_tensor 
Example #8
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_grid(times, *params):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    *params: Parameters of the Heston model. Either scalar `Tensor`s of the
      same `dtype` or instances of `PiecewiseConstantFunc`.

  Returns:
    Tuple `(all_times, mask)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times`, the
    uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`, and jump locations of piecewise constant parameters The
    `Tensor` is sorted in ascending order and may contain duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
  """
  additional_times = []
  for param in params:
    if hasattr(param, 'is_piecewise_constant'):
      if param.is_piecewise_constant:
        # Flatten all jump locations
        additional_times.append(tf.reshape(param.jump_locations(), [-1]))
  zeros = tf.constant([0], dtype=times.dtype)
  all_times = tf.concat([zeros] + [times] + additional_times, axis=0)
  additional_times_mask = [
      tf.zeros_like(times, dtype=tf.bool) for times in additional_times]
  mask = tf.concat([
      tf.cast(zeros, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ] + additional_times_mask, axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  mask = tf.gather(mask, perm)
  return all_times, mask 
Example #9
Source File: euler_sampling.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_grid(*, times, time_step, dtype):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    time_step: Rank 0 real `Tensor`. Maximal distance between points in
      resulting grid.
    dtype: `tf.Dtype` of the input and output `Tensor`s.

  Returns:
    Tuple `(all_times, mask, time_points)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times` and
    the uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`. The `Tensor` is sorted in ascending order and may contain
    duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
    `time_indices`. An integer `Tensor` of the same shape as `times` indicating
    `times` indices in `all_times`.
  """
  grid = tf.range(0.0, times[-1], time_step, dtype=dtype)
  all_times = tf.concat([grid, times], axis=0)
  mask = tf.concat([
      tf.zeros_like(grid, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ],
                   axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  # Remove duplicate points
  all_times = tf.unique(all_times).y
  time_indices = tf.searchsorted(all_times, times)
  mask = tf.gather(mask, perm)
  return all_times, mask, time_indices 
Example #10
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def diag(v, k=0):  # pylint: disable=missing-docstring
  """Raises an error if input is not 1- or 2-d."""
  v = asarray(v).data
  v_rank = tf.rank(v)

  v.shape.with_rank_at_most(2)

  # TODO(nareshmodi): Consider a utils.Assert version that will fail during
  # tracing time if the shape is known.
  tf.debugging.Assert(
      utils.logical_or(tf.equal(v_rank, 1), tf.equal(v_rank, 2)), [v_rank])

  def _diag(v, k):
    return utils.cond(
        tf.equal(tf.size(v), 0),
        lambda: tf.zeros([abs(k), abs(k)], dtype=v.dtype),
        lambda: tf.linalg.diag(v, k=k))

  def _diag_part(v, k):
    v_shape = tf.shape(v)
    v, k = utils.cond(
        utils.logical_or(
            utils.less_equal(k, -1 * utils.getitem(v_shape, 0)),
            utils.greater_equal(k, utils.getitem(v_shape, 1)),
        ), lambda: (tf.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))
    result = tf.linalg.diag_part(v, k=k)
    return result

  result = utils.cond(
      tf.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))
  return utils.tensor_to_ndarray(result) 
Example #11
Source File: resnet50_ctl_tf2.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def safe_mean(losses):
  total = tf.reduce_sum(losses)
  num_elements = tf.dtypes.cast(tf.size(losses), dtype=losses.dtype)
  return tf.math.divide_no_nan(total, num_elements) 
Example #12
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def atleast_3d(*arys):  # pylint: disable=missing-docstring

  def new_shape(_, old_shape):
    # pylint: disable=g-long-lambda
    ndim_ = tf.size(old_shape)
    return utils.cond(
        ndim_ == 0, lambda: tf.constant([1, 1, 1], dtype=tf.int32),
        lambda: utils.cond(
            ndim_ == 1, lambda: tf.pad(old_shape, [[1, 1]], constant_values=1),
            lambda: tf.pad(old_shape, [[0, 1]], constant_values=1)))

  return _atleast_nd(3, new_shape, *arys) 
Example #13
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def _pad_left_to(n, old_shape):
  old_shape = asarray(old_shape, dtype=np.int32).data
  new_shape = tf.pad(
      old_shape, [[tf.math.maximum(n - tf.size(old_shape), 0), 0]],
      constant_values=1)
  return asarray(new_shape) 
Example #14
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def _boundaries_to_sizes(a, boundaries, axis):
  """Converting boundaries of splits to sizes of splits.

  Args:
    a: the array to be split.
    boundaries: the boundaries, as in np.split.
    axis: the axis along which to split.

  Returns:
    A list of sizes of the splits, as in tf.split.
  """
  if axis >= len(a.shape):
    raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
  total_size = a.shape[axis]
  sizes = []
  sizes_sum = 0
  prev = 0
  for i, b in enumerate(boundaries):
    size = b - prev
    if size < 0:
      raise ValueError('The %s-th boundary %s is smaller than the previous '
                       'boundary %s' % (i, b, prev))
    size = min(size, max(0, total_size - sizes_sum))
    sizes.append(size)
    sizes_sum += size
    prev = b
  sizes.append(max(0, total_size - sizes_sum))
  return sizes 
Example #15
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def repeat(a, repeats, axis=None):  # pylint: disable=missing-docstring
  a = asarray(a).data
  original_shape = a._shape_as_list()  # pylint: disable=protected-access
  # Best effort recovery of the shape.
  if original_shape is not None and None not in original_shape:
    if not original_shape:
      original_shape = (repeats,)
    else:
      repeats_np = np.ravel(np.array(repeats))
      if repeats_np.size == 1:
        repeats_np = repeats_np.item()
        if axis is None:
          original_shape = (repeats_np * np.prod(original_shape),)
        else:
          original_shape[axis] = repeats_np * original_shape[axis]
      else:
        if axis is None:
          original_shape = (repeats_np.sum(),)
        else:
          original_shape[axis] = repeats_np.sum()

  repeats = asarray(repeats).data
  result = tf.repeat(a, repeats, axis)
  result.set_shape(original_shape)

  return utils.tensor_to_ndarray(result) 
Example #16
Source File: ito_process.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def _sample_paths(self, times, grid_step, keep_mask, num_requested_times,
                    num_samples, initial_state, random_type, seed, swap_memory):
    """Returns a sample of paths from the process."""
    dt = times[1:] - times[:-1]
    sqrt_dt = tf.sqrt(dt)
    current_state = initial_state + tf.zeros(
        [num_samples, self.dim()], dtype=initial_state.dtype)
    steps_num = tf.shape(dt)[-1]
    wiener_mean = tf.zeros((self.dim(), 1), dtype=self._dtype)

    cond_fn = lambda i, *args: i < steps_num

    def step_fn(i, written_count, current_state, result):
      """Performs one step of Euler scheme."""
      current_time = times[i + 1]
      dw = random_ops.mv_normal_sample((num_samples,),
                                       mean=wiener_mean,
                                       random_type=random_type,
                                       seed=seed)
      dw = dw * sqrt_dt[i]
      dt_inc = dt[i] * self.drift_fn()(current_time, current_state)  # pylint: disable=not-callable
      dw_inc = tf.squeeze(
          tf.matmul(self.volatility_fn()(current_time, current_state), dw), -1)  # pylint: disable=not-callable
      next_state = current_state + dt_inc + dw_inc

      def write_next_state_to_result():
        # Replace result[:, written_count, :] with next_state.
        one_hot = tf.one_hot(written_count, depth=num_requested_times)
        mask = tf.expand_dims(one_hot > 0, axis=-1)
        return tf.where(mask, tf.expand_dims(next_state, axis=1), result)

      # Keep only states for times requested by user.
      result = tf.cond(keep_mask[i + 1],
                       write_next_state_to_result,
                       lambda: result)
      written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
      return i + 1, written_count, next_state, result

    # Maximum number iterations is passed to the while loop below. It improves
    # performance of the while loop on a GPU and is needed for XLA-compilation
    # comptatiblity
    maximum_iterations = (
        tf.cast(1. / grid_step, dtype=tf.int32) + tf.size(times))
    result = tf.zeros((num_samples, num_requested_times, self.dim()))
    _, _, _, result = tf.compat.v1.while_loop(
        cond_fn,
        step_fn, (0, 0, current_state, result),
        maximum_iterations=maximum_iterations,
        swap_memory=swap_memory)

    return result 
Example #17
Source File: ito_process.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def sample_paths(self,
                   times,
                   num_samples=1,
                   initial_state=None,
                   random_type=None,
                   seed=None,
                   swap_memory=True,
                   name=None,
                   **kwargs):
    """Returns a sample of paths from the process.

    The default implementation uses Euler schema. However, for particular types
    of Ito processes more efficient schemes can be used.

    Args:
      times: Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      num_samples: Positive scalar `int`. The number of paths to draw.
      initial_state: `Tensor` of shape `[dim]`. The initial state of the
        process.
        Default value: None which maps to a zero initial state.
      random_type: Enum value of `RandomType`. The type of (quasi)-random number
        generator to use to generate the paths.
        Default value: None which maps to the standard pseudo-random numbers.
      seed: Python `int`. The random seed to use. If not supplied, no seed is
        set.
      swap_memory: Whether GPU-CPU memory swap is enabled for this op. See
        equivalent flag in `tf.while_loop` documentation for more details.
        Useful when computing a gradient of the op since `tf.while_loop` is used
        to propagate stochastic process in time.
      name: str. The name to give this op. If not supplied, default name of
        `sample_paths` is used.
      **kwargs: parameters, specific to Euler schema: `grid_step` is rank 0 real
        `Tensor` - maximal distance between points in grid in Euler schema.

    Returns:
     A real `Tensor` of shape [num_samples, k, n] where `k` is the size of the
        `times`, `n` is the dimension of the process.
    """
    if self.drift_fn() is None or self.volatility_fn() is None:
      raise NotImplementedError(
          'In order to use Euler scheme, both drift_fn and volatility_fn '
          'should be provided.')
    default_name = self.name() + '_sample_paths'
    with tf.compat.v1.name_scope(
        name, default_name=default_name, values=[times, initial_state]):
      if initial_state is None:
        initial_state = tf.zeros(self._dim, dtype=self._dtype)
      times = tf.convert_to_tensor(times, dtype=self._dtype)
      initial_state = tf.convert_to_tensor(
          initial_state, dtype=self._dtype, name='initial_state')
      num_requested_times = tf.shape(times)[-1]
      grid_step = kwargs['grid_step']
      times, keep_mask = self._prepare_grid(times, grid_step)
      return self._sample_paths(times, grid_step, keep_mask,
                                num_requested_times, num_samples, initial_state,
                                random_type, seed, swap_memory) 
Example #18
Source File: math_ops.py    From trax with Apache License 2.0 4 votes vote down vote up
def average(a, axis=None, weights=None, returned=False):  # pylint: disable=missing-docstring
  if axis is not None and not isinstance(axis, six.integer_types):
    # TODO(wangpeng): Support tuple of ints as `axis`
    raise ValueError('`axis` must be an integer. Tuple of ints is not '
                     'supported yet. Got type: %s' % type(axis))
  a = array_ops.array(a)
  if weights is None:  # Treat all weights as 1
    if not np.issubdtype(a.dtype, np.inexact):
      a = a.astype(utils.result_type(a.dtype, dtypes.default_float_type()))
    avg = tf.reduce_mean(a.data, axis=axis)
    if returned:
      if axis is None:
        weights_sum = tf.size(a.data)
      else:
        weights_sum = tf.shape(a.data)[axis]
      weights_sum = tf.cast(weights_sum, a.data.dtype)
  else:
    if np.issubdtype(a.dtype, np.inexact):
      out_dtype = utils.result_type(a.dtype, weights)
    else:
      out_dtype = utils.result_type(a.dtype, weights,
                                    dtypes.default_float_type())
    a = array_ops.array(a, out_dtype).data
    weights = array_ops.array(weights, out_dtype).data

    def rank_equal_case():
      tf.debugging.Assert(tf.reduce_all(tf.shape(a) == tf.shape(weights)),
                          [tf.shape(a), tf.shape(weights)])
      weights_sum = tf.reduce_sum(weights, axis=axis)
      avg = tf.reduce_sum(a * weights, axis=axis) / weights_sum
      return avg, weights_sum
    if axis is None:
      avg, weights_sum = rank_equal_case()
    else:
      def rank_not_equal_case():
        tf.debugging.Assert(tf.rank(weights) == 1, [tf.rank(weights)])
        weights_sum = tf.reduce_sum(weights)
        axes = tf.convert_to_tensor([[axis], [0]])
        avg = tf.tensordot(a, weights, axes) / weights_sum
        return avg, weights_sum
      # We condition on rank rather than shape equality, because if we do the
      # latter, when the shapes are partially unknown but the ranks are known
      # and different, utils.cond will run shape checking on the true branch,
      # which will raise a shape-checking error.
      avg, weights_sum = utils.cond(tf.rank(a) == tf.rank(weights),
                                    rank_equal_case, rank_not_equal_case)

  avg = array_ops.array(avg)
  if returned:
    weights_sum = array_ops.broadcast_to(weights_sum, tf.shape(avg.data))
    return avg, weights_sum
  return avg