Python tensorflow.compat.v2.shape() Examples

The following are 30 code examples of tensorflow.compat.v2.shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def real(val):
  """Returns real parts of all elements in `a`.

  Uses `tf.real`.

  Args:
    val: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.

  Returns:
    An ndarray with the same shape as `a`.
  """
  val = asarray(val)
  # TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
  # return an ndarray.
  return utils.tensor_to_ndarray(tf.math.real(val.data)) 
Example #2
Source File: multidim_parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _slice(tensor, dim, start, end):
  """Slices the tensor along given dimension."""
  # Performs a slice along the dimension dim. E.g. for tensor t of rank 3,
  # _slice(t, 1, 3, 5) is same as t[:, 3:5].
  # For a slice unbounded to the right, set end=0: _slice(t, 1, -3, 0) is same
  # as t[:, -3:].
  rank = len(tensor.shape.as_list())
  if start < 0:
    start += tf.compat.dimension_value(tensor.shape.as_list()[dim])
  if end <= 0:
    end += tf.compat.dimension_value(tensor.shape.as_list()[dim])
  slice_begin = np.zeros(rank, dtype=np.int32)
  slice_begin[dim] = start
  slice_size = -np.ones(rank, dtype=np.int32)
  slice_size[dim] = end - start
  return tf.slice(tensor, slice_begin, slice_size) 
Example #3
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def triu(m, k=0):  # pylint: disable=missing-docstring
  m = asarray(m).data
  m_shape = m.shape.as_list()

  if len(m_shape) < 2:
    raise ValueError('Argument to triu must have rank at least 2')

  if m_shape[-1] is None or m_shape[-2] is None:
    raise ValueError('Currently, the last two dimensions of the input array '
                     'need to be known.')

  z = tf.constant(0, m.dtype)

  mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
  return utils.tensor_to_ndarray(
      tf.where(tf.broadcast_to(mask, tf.shape(m)), z, m)) 
Example #4
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def tril(m, k=0):  # pylint: disable=missing-docstring
  m = asarray(m).data
  m_shape = m.shape.as_list()

  if len(m_shape) < 2:
    raise ValueError('Argument to tril must have rank at least 2')

  if m_shape[-1] is None or m_shape[-2] is None:
    raise ValueError('Currently, the last two dimensions of the input array '
                     'need to be known.')

  z = tf.constant(0, m.dtype)

  mask = tri(*m_shape[-2:], k=k, dtype=bool)
  return utils.tensor_to_ndarray(
      tf.where(tf.broadcast_to(mask, tf.shape(m)), m, z)) 
Example #5
Source File: multidim_parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _trim_boundaries(tensor, from_dim, shifts=None):
  """Trims tensor boundaries starting from given dimension."""
  # For example, if tensor has shape (a, b, c, d) and from_dim=1, then the
  # output tensor has shape (a, b-2, c-2, d-2).
  # For example _trim_boundaries_with_shifts(t, 1) with a rank-4
  # tensor t yields t[:, 1:-1, 1:-1, 1:-1].
  #
  # If shifts is specified, the slices applied are shifted. shifts is an array
  # of length rank(tensor) - from_dim, with values -1, 0, or 1, meaning slices
  # [:-2], [-1, 1], and [2:], respectively.
  # For example _trim_boundaries_with_shifts(t, 1, (1, 0, -1)) with a rank-4
  #  tensor t yields t[:, 2:, 1:-1, :-2].
  rank = len(tensor.shape.as_list())
  slice_begin = np.zeros(rank, dtype=np.int32)
  slice_size = np.zeros(rank, dtype=np.int32)
  for i in range(from_dim):
    slice_size[i] = tf.compat.dimension_value(tensor.shape.as_list()[i])
  for i in range(from_dim, rank):
    slice_begin[i] = 1
    slice_size[i] = tf.compat.dimension_value(tensor.shape.as_list()[i]) - 2
    if shifts is not None:
      slice_begin[i] += shifts[i - from_dim]
  return tf.slice(tensor, slice_begin, slice_size) 
Example #6
Source File: math_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def diff(a, n=1, axis=-1):
  def f(a):
    nd = a.shape.rank
    if (axis + nd if axis < 0 else axis) >= nd:
      raise ValueError("axis %s is out of bounds for array of dimension %s" %
                       (axis, nd))
    if n < 0:
      raise ValueError("order must be non-negative but got %s" % n)
    slice1 = [slice(None)] * nd
    slice2 = [slice(None)] * nd
    slice1[axis] = slice(1, None)
    slice2[axis] = slice(None, -1)
    slice1 = tuple(slice1)
    slice2 = tuple(slice2)
    op = tf.not_equal if a.dtype == tf.bool else tf.subtract
    for _ in range(n):
      a = op(a[slice1], a[slice2])
    return a
  return _scalar(f, a) 
Example #7
Source File: network.py    From ranking with Apache License 2.0 6 votes vote down vote up
def compute_logits(self,
                     context_features=None,
                     example_features=None,
                     training=None,
                     mask=None):
    """Scores context and examples to return a score per document.

    Args:
      context_features: (dict) context feature names to 2D tensors of shape
        [batch_size, feature_dims].
      example_features: (dict) example feature names to 3D tensors of shape
        [batch_size, list_size, feature_dims].
      training: (bool) whether in train or inference mode.
      mask: (tf.Tensor) Mask is a tensor of shape [batch_size, list_size], which
        is True for a valid example and False for invalid one. If mask is None,
        all entries are valid.

    Returns:
      (tf.Tensor) A score tensor of shape [batch_size, list_size].
    """
    raise NotImplementedError('Calling an abstract method, '
                              'tfr.keras.RankingModel.compute_logits().') 
Example #8
Source File: network.py    From ranking with Apache License 2.0 6 votes vote down vote up
def transform(self, features=None, training=None, mask=None):
    """Transforms the features into dense context features and example features.

    The user can overwrite this function for custom transformations.
    Mask is provided as an argument so that inherited models can have access
    to it for custom feature transformations, without modifying
    `call` explicitly.

    Args:
      features: (dict) with a mix of context (2D) and example features (3D).
      training: (bool) whether in train or inference mode.
      mask: (tf.Tensor) Mask is a tensor of shape [batch_size, list_size], which
        is True for a valid example and False for invalid one.

    Returns:
      context_features: (dict) context feature names to dense 2D tensors of
        shape [batch_size, feature_dims].
      example_features: (dict) example feature names to dense 3D tensors of
        shape [batch_size, list_size, feature_dims].
    """
    del mask
    context_features, example_features = self._listwise_dense_layer(
        inputs=features, training=training)
    return context_features, example_features 
Example #9
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def take(a, indices, axis=None, out=None, mode='clip'):
  """out argument is not supported, and default mode is clip."""
  if out is not None:
    raise ValueError('out argument is not supported in take.')

  if mode not in {'raise', 'clip', 'wrap'}:
    raise ValueError("Invalid mode '{}' for take".format(mode))

  a = asarray(a).data
  indices = asarray(indices).data

  if axis is None:
    a = tf.reshape(a, [-1])
    axis = 0

  axis_size = tf.shape(a, indices.dtype)[axis]
  if mode == 'clip':
    indices = tf.clip_by_value(indices, 0, axis_size-1)
  elif mode == 'wrap':
    indices = tf.math.floormod(indices, axis_size)
  else:
    raise ValueError("The 'raise' mode to take is not supported.")

  return utils.tensor_to_ndarray(tf.gather(a, indices, axis=axis)) 
Example #10
Source File: feature.py    From ranking with Apache License 2.0 6 votes vote down vote up
def call(self, inputs):
    """Generates mask (whether example is valid) from features.

    Args:
      inputs: (dict) Features with a mix of context (2D) and example features
        (3D).

    Returns:
      mask: (tf.Tensor) Mask is a tensor of shape [batch_size, list_size], which
        is True for a valid example and False for invalid one.
    """
    example_feature = inputs[next(six.iterkeys(self._example_feature_columns))]
    list_size = tf.shape(example_feature)[1]
    sizes = inputs[self._size_feature_name]
    mask = tf.sequence_mask(sizes, maxlen=list_size)
    return mask 
Example #11
Source File: tf.py    From trax with Apache License 2.0 6 votes vote down vote up
def tf_randint(key, shape, minval, maxval, dtype=np.int32):
  """Sample uniform random values in [minval, maxval) with given shape/dtype.

  Args:
    key: a PRNGKey used as the random key.
    shape: a tuple of nonnegative integers representing the shape.
    minval: int or array of ints broadcast-compatible with ``shape``, a minimum
      (inclusive) value for the range.
    maxval: int or array of ints broadcast-compatible with  ``shape``, a maximum
      (exclusive) value for the range.
    dtype: optional, an int dtype for the returned values (default int32).

  Returns:
    A random array with the specified shape and dtype.
  """
  return tf_np_extensions.uniform(key, shape, minval=minval, maxval=maxval,
                                  dtype=dtype) 
Example #12
Source File: utils.py    From trax with Apache License 2.0 6 votes vote down vote up
def tf_broadcast(*args):
  """Broadcast tensors.

  Args:
    *args: a list of tensors whose shapes are broadcastable against each other.

  Returns:
    Tensors broadcasted to the common shape.
  """
  if len(args) <= 1:
    return args
  sh = tf.shape(args[0])
  for arg in args[1:]:
    sh = tf.broadcast_dynamic_shape(sh, tf.shape(arg))
  return [tf.broadcast_to(arg, sh) for arg in args]


# TODO(wangpeng): Move the following functions to a separate file and check for
#   float dtypes in each of them. 
Example #13
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def imag(a):
  """Returns imaginary parts of all elements in `a`.

  Uses `tf.imag`.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.

  Returns:
    An ndarray with the same shape as `a`.
  """
  a = asarray(a)
  # TODO(srbs): np.imag returns a scalar if a is a scalar, whereas we always
  # return an ndarray.
  return utils.tensor_to_ndarray(tf.math.imag(a.data)) 
Example #14
Source File: math_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def _scalar(tf_fn, x, promote_to_float=False):
  """Computes the tf_fn(x) for each element in `x`.

  Args:
    tf_fn: function that takes a single Tensor argument.
    x: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    promote_to_float: whether to cast the argument to a float dtype
      (`dtypes.default_float_type`) if it is not already.

  Returns:
    An ndarray with the same shape as `x`. The default output dtype is
    determined by `dtypes.default_float_type`, unless x is an ndarray with a
    floating point type, in which case the output type is same as x.dtype.
  """
  x = array_ops.asarray(x)
  if promote_to_float and not np.issubdtype(x.dtype, np.inexact):
    x = x.astype(dtypes.default_float_type())
  return utils.tensor_to_ndarray(tf_fn(x.data)) 
Example #15
Source File: utils.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def broadcast_common_batch_shape(x, y):
  """Broadcasts batch shapes of `x` and `y`."""
  rank = max(x.shape.rank, y.shape.rank)
  x = expand_to_rank(x, rank)
  y = expand_to_rank(y, rank)
  if x.shape.as_list()[:-1] != y.shape.as_list()[:-1]:
    try:
      x = broadcast_batch_shape(x, y.shape[:-1])
    except (tf.errors.InvalidArgumentError, ValueError):
      try:
        y = broadcast_batch_shape(y, x.shape[:-1])
      except (tf.errors.InvalidArgumentError, ValueError):
        raise ValueError(
            "Can not broadcast batch shapes {0} and {1}".format(
                x.shape.as_list()[:-1], y.shape.as_list()[:-1]))
  return x, y 
Example #16
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):  # pylint: disable=missing-docstring,redefined-outer-name
  """order, subok and shape arguments mustn't be changed."""
  if order != 'K':
    raise ValueError('Non-standard orders are not supported.')
  if not subok:
    raise ValueError('subok being False is not supported.')
  if shape:
    raise ValueError('Overriding the shape is not supported.')

  a = asarray(a).data
  dtype = dtype or utils.result_type(a)
  fill_value = asarray(fill_value, dtype=dtype)
  return arrays_lib.tensor_to_ndarray(
      tf.broadcast_to(fill_value.data, tf.shape(a)))


# TODO(wangpeng): investigate whether we can make `copy` default to False.
# TODO(wangpeng): utils.np_doc can't handle np.array because np.array is a
#   builtin function. Make utils.np_doc support builtin functions. 
Example #17
Source File: brownian_motion.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def total_covariance_fn(self):
    """The total covariance of the process between two times.

    Returns:
      A Python callable returning the integrated covariances between two times.
      The callable accepts two real `Tensor` arguments. The first argument
      is the left end point and the second is the right end point of the time
      interval for which the total covariance is needed.

      The shape of the two input arguments and their dtypes must match.
      The output of the callable is a `Tensor` of shape
      `times_shape + [dim, dim]` containing the integrated covariance matrix
      between the start times and end times.
    """
    return self._total_covariance_fn

  # Override 
Example #18
Source File: brownian_motion.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _exact_sampling(self, end_times, start_times, num_samples, initial_state,
                      random_type, seed):
    """Returns a sample of paths from the process."""
    non_decreasing = tf.debugging.assert_greater_equal(
        end_times, start_times, message='Sampling times must be non-decreasing')
    starts_non_negative = tf.debugging.assert_greater_equal(
        start_times,
        tf.zeros_like(start_times),
        message='Sampling times must not be < 0.')
    with tf.compat.v1.control_dependencies(
        [starts_non_negative, non_decreasing]):
      drifts = self._total_drift_fn(start_times, end_times)
      covars = self._total_covariance_fn(start_times, end_times)
      # path_deltas are of shape [num_samples, size(times), dim].
      path_deltas = mvn.multivariate_normal((num_samples,),
                                            mean=drifts,
                                            covariance_matrix=covars,
                                            random_type=random_type,
                                            seed=seed)
      paths = tf.cumsum(path_deltas, axis=1)
    return paths

  # Override 
Example #19
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def ones(shape, dtype=float):  # pylint: disable=redefined-outer-name
  """Returns an ndarray with the given shape and type filled with ones.

  Args:
    shape: A fully defined shape. Could be - NumPy array or a python scalar,
      list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
      rank <=1.
    dtype: Optional, defaults to float. The type of the resulting ndarray. Could
      be a python type, a NumPy type or a TensorFlow `DType`.

  Returns:
    An ndarray.
  """
  if dtype:
    dtype = utils.result_type(dtype)
  if isinstance(shape, arrays_lib.ndarray):
    shape = shape.data
  return arrays_lib.tensor_to_ndarray(tf.ones(shape, dtype=dtype)) 
Example #20
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def zeros_like(a, dtype=None):
  """Returns an array of zeros with the shape and type of the input array.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can be
      converted to a Tensor using `tf.convert_to_tensor`.
    dtype: Optional, defaults to dtype of the input array. The type of the
      resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
      `DType`.

  Returns:
    An ndarray.
  """
  if isinstance(a, arrays_lib.ndarray):
    a = a.data
  if dtype is None:
    # We need to let utils.result_type decide the dtype, not tf.zeros_like
    dtype = utils.result_type(a)
  else:
    # TF and numpy has different interpretations of Python types such as
    # `float`, so we let `utils.result_type` decide.
    dtype = utils.result_type(dtype)
  dtype = tf.as_dtype(dtype)  # Work around b/149877262
  return arrays_lib.tensor_to_ndarray(tf.zeros_like(a, dtype)) 
Example #21
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def zeros(shape, dtype=float):  # pylint: disable=redefined-outer-name
  """Returns an ndarray with the given shape and type filled with zeros.

  Args:
    shape: A fully defined shape. Could be - NumPy array or a python scalar,
      list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
      rank <=1.
    dtype: Optional, defaults to float. The type of the resulting ndarray. Could
      be a python type, a NumPy type or a TensorFlow `DType`.

  Returns:
    An ndarray.
  """
  if dtype:
    dtype = utils.result_type(dtype)
  if isinstance(shape, arrays_lib.ndarray):
    shape = shape.data
  return arrays_lib.tensor_to_ndarray(tf.zeros(shape, dtype=dtype)) 
Example #22
Source File: network_test.py    From ranking with Apache License 2.0 5 votes vote down vote up
def compute_logits(self,
                     context_features=None,
                     example_features=None,
                     training=True,
                     mask=None):
    batch_size, list_size, _ = example_features["utility"].get_shape().as_list()
    return tf.ones(shape=(batch_size, list_size)) 
Example #23
Source File: network.py    From ranking with Apache License 2.0 5 votes vote down vote up
def score(self, context_features=None, example_features=None, training=None):
    """Univariate scoring of context and one example to generate a score.

    Args:
      context_features: (dict) context feature names to 2D tensors of shape
        [batch_size, ...].
      example_features: (dict) example feature names to 2D tensors of shape
        [batch_size, ...].
      training: (bool) whether in training or inference mode.

    Returns:
      (tf.Tensor) A score tensor of shape [batch_size, 1].
    """
    raise NotImplementedError('Calling an abstract method, '
                              'tfr.keras.UnivariateRankingModel.score().') 
Example #24
Source File: census_example_v2.py    From transform with Apache License 2.0 5 votes vote down vote up
def export_serving_model(tf_transform_output, model, output_dir):
  """Exports a keras model for serving.

  Args:
    tf_transform_output: Wrapper around output of tf.Transform.
    model: A keras model to export for serving.
    output_dir: A directory where the model will be exported to.
  """
  # The layer has to be saved to the model for keras tracking purpases.
  model.tft_layer = tf_transform_output.transform_features_layer()

  @tf.function
  def serve_tf_examples_fn(serialized_tf_examples):
    """Serving tf.function model wrapper."""
    feature_spec = RAW_DATA_FEATURE_SPEC.copy()
    feature_spec.pop(LABEL_KEY)
    parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
    transformed_features = model.tft_layer(parsed_features)
    outputs = model(transformed_features)
    classes_names = tf.constant([['0', '1']])
    classes = tf.tile(classes_names, [tf.shape(outputs)[0], 1])
    return {'classes': classes, 'scores': outputs}

  concrete_serving_fn = serve_tf_examples_fn.get_concrete_function(
      tf.TensorSpec(shape=[None], dtype=tf.string, name='inputs'))
  signatures = {'serving_default': concrete_serving_fn}

  # This is required in order to make this model servable with model_server.
  versioned_output_dir = os.path.join(output_dir, '1')
  model.save(versioned_output_dir, save_format='tf', signatures=signatures) 
Example #25
Source File: piecewise.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _try_broadcast_to(x, batch_shape, name):
  """Broadcasts batch shape of `x` to a `batch_shape` if possible."""
  batch_shape_x = x.shape.as_list()[:-1]
  if batch_shape_x != batch_shape:
    try:
      np.broadcast_to(np.zeros(batch_shape_x), batch_shape)
    except ValueError:
      raise ValueError('Batch shapes of `{2}` should be broadcastable with {0} '
                       'but it is {1} instead'.format(
                           batch_shape, batch_shape_x, name))
    return tf.broadcast_to(x, batch_shape + x.shape[-1:])
  return x 
Example #26
Source File: model_test.py    From ranking with Apache License 2.0 5 votes vote down vote up
def _context_feature_columns():
  return {
      "query_length":
          tf.feature_column.numeric_column(
              "query_length", shape=(1,), default_value=0, dtype=tf.int64)
  } 
Example #27
Source File: network_test.py    From ranking with Apache License 2.0 5 votes vote down vote up
def _get_feature_columns():

  def _normalizer_fn(t):
    return tf.math.log1p(t * tf.sign(t)) * tf.sign(t)

  context_feature_columns = {
      "query_length":
          tf.feature_column.numeric_column(
              "query_length", shape=(1,), default_value=0, dtype=tf.int64)
  }
  example_feature_columns = {
      "utility":
          tf.feature_column.numeric_column(
              "utility",
              shape=(1,),
              default_value=0.0,
              dtype=tf.float32,
              normalizer_fn=_normalizer_fn),
      "unigrams":
          tf.feature_column.embedding_column(
              tf.feature_column.categorical_column_with_vocabulary_list(
                  "unigrams",
                  vocabulary_list=[
                      "ranking", "regression", "classification", "ordinal"
                  ]),
              dimension=10)
  }
  custom_objects = {"_normalizer_fn": _normalizer_fn}
  return context_feature_columns, example_feature_columns, custom_objects 
Example #28
Source File: model_test.py    From ranking with Apache License 2.0 5 votes vote down vote up
def score(self, context_features=None, example_features=None, training=True):
    large_batch_size = tf.shape(example_features["utility"])[0]
    return tf.ones(shape=(large_batch_size, 1)) 
Example #29
Source File: model_test.py    From ranking with Apache License 2.0 5 votes vote down vote up
def _example_feature_columns():
  return {
      "utility":
          tf.feature_column.numeric_column(
              "utility", shape=(1,), default_value=0.0, dtype=tf.float32),
      "unigrams":
          tf.feature_column.embedding_column(
              tf.feature_column.categorical_column_with_vocabulary_list(
                  "unigrams",
                  vocabulary_list=[
                      "ranking", "regression", "classification", "ordinal"
                  ]),
              dimension=10)
  } 
Example #30
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def stochastic_round_po2(x):
  """Performs stochastic rounding for the power of two."""
  # TODO(hzhuang): test stochastic_round_po2 and constraint.
  # because quantizer is applied after constraint.
  y = tf.abs(x)
  eps = tf.keras.backend.epsilon()
  log2 = tf.keras.backend.log(2.0)

  x_log2 = tf.round(tf.keras.backend.log(y + eps) / log2)
  po2 = tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
  left_val = tf.where(po2 > y, x_log2 - 1, x_log2)
  right_val = tf.where(po2 > y, x_log2, x_log2 + 1)
  # sampling in [2**left_val, 2**right_val].
  minval = 2 ** left_val
  maxval = 2 ** right_val
  val = tf.random.uniform(tf.shape(y), minval=minval, maxval=maxval)
  # use y as a threshold to keep the probabliy [2**left_val, y, 2**right_val]
  # so that the mean value of the sample should be y
  x_po2 = tf.where(y < val, left_val, right_val)
  """
  x_log2 = stochastic_round(tf.keras.backend.log(y + eps) / log2)
  sign = tf.sign(x)
  po2 = (
      tf.sign(x) *
      tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
  )
  """
  return x_po2