Python tensorflow.python.ops.math_ops.logical_or() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.logical_or(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: feedback.py    From sequencing with MIT License 6 votes vote down vote up
def next_inputs(self, time, sample_ids=None, prev_finished=None):
        if sample_ids is None or self.teacher_rate > 0.:
            finished = tf.greater_equal(time + 1, self.sequence_length)
        else:
            finished = math_ops.logical_or(
                tf.greater_equal(time + 1, self.max_step),
                tf.equal(self.eos_id, sample_ids))

        if self.teacher_rate == 1. or (sample_ids is None):
            next_input_ids = self._input_tas.read(time)
            return finished, self.lookup(next_input_ids)

        if self.teacher_rate > 0.:
            # scheduled
            teacher_rates = tf.less_equal(
                tf.random_uniform(tf.shape(sample_ids), minval=0., maxval=1.),
                self.teacher_rate)
            teacher_rates = tf.to_int32(teacher_rates)

            next_input_ids = (teacher_rates * self._input_tas.read(time)
                              + (1 - teacher_rates) * sample_ids)
        else:
            next_input_ids = sample_ids

        return finished, self.lookup(next_input_ids) 
Example #2
Source File: sparse_optimizers.py    From rigl with Apache License 2.0 5 votes vote down vote up
def is_mask_update_iter(self, global_step, last_update_step):
    """Function for checking if the current step is a mask update step.

    It also creates the drop_fraction op and assigns it to the self object.

    Args:
      global_step: tf.Variable(int), current training step.
      last_update_step: tf.Variable(int), holding the last iteration the mask
        is updated. Used to determine whether current iteration is a mask
        update step.


    Returns:
      bool, whether the current iteration is a mask_update step.
    """
    gs_dtype = global_step.dtype
    self._begin_step = math_ops.cast(self._begin_step, gs_dtype)
    self._end_step = math_ops.cast(self._end_step, gs_dtype)
    self._frequency = math_ops.cast(self._frequency, gs_dtype)
    is_step_within_update_range = math_ops.logical_and(
        math_ops.greater_equal(global_step, self._begin_step),
        math_ops.logical_or(
            math_ops.less_equal(global_step, self._end_step),
            # If _end_step is negative, we never stop updating the mask.
            # In other words we update the mask with given frequency until the
            # training ends.
            math_ops.less(self._end_step, 0)))
    is_update_step = math_ops.less_equal(
        math_ops.add(last_update_step, self._frequency), global_step)
    is_mask_update_iter_op = math_ops.logical_and(
        is_step_within_update_range, is_update_step)
    self.drop_fraction = self.get_drop_fraction(global_step,
                                                is_mask_update_iter_op)
    return is_mask_update_iter_op 
Example #3
Source File: boolean_mask.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain) 
Example #4
Source File: uniform.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return math_ops.select(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        math_ops.select(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x))) 
Example #5
Source File: feedback.py    From sequencing with MIT License 5 votes vote down vote up
def next_inputs(self, time, sample_ids, prev_finished):
        finished = math_ops.logical_or(
            tf.greater_equal(time + 1, tf.maximum(self.max_step,
                                                  self.max_sequence_length)),
            tf.equal(self.eos_id, sample_ids))
        next_finished = math_ops.logical_or(finished, prev_finished)
        return next_finished, self.lookup(sample_ids) 
Example #6
Source File: feedback.py    From sequencing with MIT License 5 votes vote down vote up
def next_inputs(self, time, sample_ids):
        finished = math_ops.logical_or(
            tf.greater_equal(time + 1, self.max_step),
            tf.equal(self.eos_id, sample_ids))
        return finished, self.lookup(sample_ids) 
Example #7
Source File: tpu_estimator.py    From embedding-as-service with MIT License 5 votes vote down vote up
def slice_tensor_or_dict(tensor_or_dict, signals):
    """Slice the real Tensors according to padding mask in signals."""

    padding_mask = signals['padding_mask']
    batch_size = array_ops.shape(padding_mask)[0]

    def verify_batch_size(tensor):
      check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
      with ops.control_dependencies([check_batch_size]):
        return array_ops.identity(tensor)

    def slice_single_tensor(tensor):
      rank = len(tensor.shape)
      assert rank > 0
      real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
      return verify_batch_size(tensor)[0:real_batch_size]

    # As we split the Tensors to all TPU cores and concat them back, it is
    # important to ensure the real data is placed before padded ones, i.e.,
    # order is preserved. By that, the sliced padding mask should have all 0's.
    # If this assertion failed, # the slice logic here would not hold.
    sliced_padding_mask = slice_single_tensor(padding_mask)
    assert_padding_mask = math_ops.equal(
        math_ops.reduce_sum(sliced_padding_mask), 0)

    with ops.control_dependencies([assert_padding_mask]):
      should_stop = _StopSignals.should_stop(
          _StopSignals.as_scalar_stopping_signal(signals))

    is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)

    def slice_fn(tensor):
      # If the current batch is full batch or part of stopping signals, we do
      # not need to slice to save performance.
      return control_flow_ops.cond(
          math_ops.logical_or(should_stop, is_full_batch),
          (lambda: verify_batch_size(tensor)),
          (lambda: slice_single_tensor(tensor)))

    return nest.map_structure(slice_fn, tensor_or_dict) 
Example #8
Source File: tpu_estimator.py    From transformer-xl with Apache License 2.0 5 votes vote down vote up
def slice_tensor_or_dict(tensor_or_dict, signals):
    """Slice the real Tensors according to padding mask in signals."""

    padding_mask = signals['padding_mask']
    batch_size = array_ops.shape(padding_mask)[0]

    def verify_batch_size(tensor):
      check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
      with ops.control_dependencies([check_batch_size]):
        return array_ops.identity(tensor)

    def slice_single_tensor(tensor):
      rank = len(tensor.shape)
      assert rank > 0
      real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
      return verify_batch_size(tensor)[0:real_batch_size]

    # As we split the Tensors to all TPU cores and concat them back, it is
    # important to ensure the real data is placed before padded ones, i.e.,
    # order is preserved. By that, the sliced padding mask should have all 0's.
    # If this assertion failed, # the slice logic here would not hold.
    sliced_padding_mask = slice_single_tensor(padding_mask)
    assert_padding_mask = math_ops.equal(
        math_ops.reduce_sum(sliced_padding_mask), 0)

    with ops.control_dependencies([assert_padding_mask]):
      should_stop = _StopSignals.should_stop(
          _StopSignals.as_scalar_stopping_signal(signals))

    is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)

    def slice_fn(tensor):
      # If the current batch is full batch or part of stopping signals, we do
      # not need to slice to save performance.
      return control_flow_ops.cond(
          math_ops.logical_or(should_stop, is_full_batch),
          (lambda: verify_batch_size(tensor)),
          (lambda: slice_single_tensor(tensor)))

    return nest.map_structure(slice_fn, tensor_or_dict) 
Example #9
Source File: math_ops.py    From pcc_geo_cnn with MIT License 5 votes vote down vote up
def _upper_bound_grad(op, grad):
  """Gradient for `upper_bound` if `gradient == 'identity_if_towards'`.

  Args:
    op: The op for which to calculate a gradient.
    grad: Gradient with respect to the output of the op.

  Returns:
    Gradient with respect to the inputs of the op.
  """
  inputs, bound = op.inputs
  pass_through_if = math_ops.logical_or(inputs <= bound, grad > 0)
  return [math_ops.cast(pass_through_if, grad.dtype) * grad, None] 
Example #10
Source File: math_ops.py    From pcc_geo_cnn with MIT License 5 votes vote down vote up
def _lower_bound_grad(op, grad):
  """Gradient for `lower_bound` if `gradient == 'identity_if_towards'`.

  Args:
    op: The op for which to calculate a gradient.
    grad: Gradient with respect to the output of the op.

  Returns:
    Gradient with respect to the inputs of the op.
  """
  inputs, bound = op.inputs
  pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)
  return [math_ops.cast(pass_through_if, grad.dtype) * grad, None] 
Example #11
Source File: core_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def setUp(self):
    super(LogicalBinaryOpsTest, self).setUp()

    self.ops = [
        ('logical_and', operator.and_, math_ops.logical_and, core.logical_and),
        ('logical_or', operator.or_, math_ops.logical_or, core.logical_or),
        ('logical_xor', operator.xor, math_ops.logical_xor, core.logical_xor),
    ]
    self.test_lt_1 = self.original_lt < 10
    self.test_lt_2 = self.original_lt < 5
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes 
Example #12
Source File: tpu_estimator.py    From xlnet with Apache License 2.0 5 votes vote down vote up
def slice_tensor_or_dict(tensor_or_dict, signals):
    """Slice the real Tensors according to padding mask in signals."""

    padding_mask = signals['padding_mask']
    batch_size = array_ops.shape(padding_mask)[0]

    def verify_batch_size(tensor):
      check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
      with ops.control_dependencies([check_batch_size]):
        return array_ops.identity(tensor)

    def slice_single_tensor(tensor):
      rank = len(tensor.shape)
      assert rank > 0
      real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
      return verify_batch_size(tensor)[0:real_batch_size]

    # As we split the Tensors to all TPU cores and concat them back, it is
    # important to ensure the real data is placed before padded ones, i.e.,
    # order is preserved. By that, the sliced padding mask should have all 0's.
    # If this assertion failed, # the slice logic here would not hold.
    sliced_padding_mask = slice_single_tensor(padding_mask)
    assert_padding_mask = math_ops.equal(
        math_ops.reduce_sum(sliced_padding_mask), 0)

    with ops.control_dependencies([assert_padding_mask]):
      should_stop = _StopSignals.should_stop(
          _StopSignals.as_scalar_stopping_signal(signals))

    is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)

    def slice_fn(tensor):
      # If the current batch is full batch or part of stopping signals, we do
      # not need to slice to save performance.
      return control_flow_ops.cond(
          math_ops.logical_or(should_stop, is_full_batch),
          (lambda: verify_batch_size(tensor)),
          (lambda: slice_single_tensor(tensor)))

    return nest.map_structure(slice_fn, tensor_or_dict) 
Example #13
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_logical_binary(logical_bin_op, data):

    with tf.Graph().as_default():
        in_data = [array_ops.placeholder(shape=data[0].shape, dtype='bool', name='in_0'),
                   array_ops.placeholder(shape=data[1].shape, dtype='bool', name='in_1')]
        if logical_bin_op == math_ops.logical_not:
            out = math_ops.logical_or(in_data[0], in_data[1], name='out1')
            out = logical_bin_op(out, name='out')
        else:
            out = logical_bin_op(in_data[0], in_data[1], name='out')

        compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out]) 
Example #14
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_forward_logical_or(data):
    """ One iteration of logical or """
    return _test_logical_binary(math_ops.logical_or, data) 
Example #15
Source File: check_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _dynamic_rank_in(actual_rank, given_ranks):
  if len(given_ranks) < 1:
    return ops.convert_to_tensor(False)
  result = math_ops.equal(given_ranks[0], actual_rank)
  for given_rank in given_ranks[1:]:
    result = math_ops.logical_or(
        result, math_ops.equal(given_rank, actual_rank))
  return result 
Example #16
Source File: uniform.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape_tensor())
    return array_ops.where(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        array_ops.where(
            math_ops.logical_or(broadcasted_x < self.low,
                                broadcasted_x >= self.high),
            array_ops.zeros_like(broadcasted_x),
            array_ops.ones_like(broadcasted_x) / self.range())) 
Example #17
Source File: feature_column.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if not input_tensor.dtype.is_integer:
      raise ValueError(
          'Invalid input, not integer. key: {} dtype: {}'.format(
              self.key, input_tensor.dtype))

    values = math_ops.to_int64(input_tensor.values, name='values')
    num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
    zero = math_ops.to_int64(0, name='zero')
    if self.default_value is None:
      # Fail if values are out-of-range.
      assert_less = check_ops.assert_less(
          values, num_buckets, data=(values, num_buckets),
          name='assert_less_than_num_buckets')
      assert_greater = check_ops.assert_greater_equal(
          values, zero, data=(values,),
          name='assert_greater_or_equal_0')
      with ops.control_dependencies((assert_less, assert_greater)):
        values = array_ops.identity(values)
    else:
      # Assign default for out-of-range values.
      values = array_ops.where(
          math_ops.logical_or(
              values < zero, values >= num_buckets, name='out_of_range'),
          array_ops.fill(
              dims=array_ops.shape(values),
              value=math_ops.to_int64(self.default_value),
              name='default_values'),
          values)

    return sparse_tensor_lib.SparseTensor(
        indices=input_tensor.indices,
        values=values,
        dense_shape=input_tensor.dense_shape) 
Example #18
Source File: check_ops.py    From keras-lambda with MIT License 5 votes vote down vote up
def _dynamic_rank_in(actual_rank, given_ranks):
  if len(given_ranks) < 1:
    return ops.convert_to_tensor(False)
  result = math_ops.equal(given_ranks[0], actual_rank)
  for given_rank in given_ranks[1:]:
    result = math_ops.logical_or(
        result, math_ops.equal(given_rank, actual_rank))
  return result 
Example #19
Source File: boolean_mask.py    From keras-lambda with MIT License 5 votes vote down vote up
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain) 
Example #20
Source File: uniform.py    From keras-lambda with MIT License 5 votes vote down vote up
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return array_ops.where(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        array_ops.where(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x))) 
Example #21
Source File: layers.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _lower_bound_grad(op, grad):
    """Gradient for `_lower_bound`.

    Args:
      op: the tensorflow op for which to calculate a gradient
      grad: gradient with respect to the output of the op

    Returns:
      gradients with respect to the inputs of the op
    """
    inputs = op.inputs[0]
    bound = op.inputs[1]
    pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)
    return [math_ops.cast(pass_through_if, grad.dtype) * grad, None] 
Example #22
Source File: tf_helpers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def next_inputs(self, time, outputs, state, sample_ids, name=None,
                    reach_max_time=None):
        """Gets the inputs for next step."""
        finished = math_ops.equal(sample_ids, self._end_token)
        all_finished = math_ops.reduce_all(finished)
        if reach_max_time is not None:
            all_finished = tf.logical_or(all_finished, reach_max_time)

        if self._embedding_args_cnt == 1:
            del time, outputs  # unused by next_inputs_fn
            next_inputs = control_flow_ops.cond(
                all_finished,
                # If we're finished, the next_inputs value doesn't matter
                lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids))
        elif self._embedding_args_cnt == 2:
            del outputs
            # Prepare the position embedding of the next step
            times = tf.ones(self._batch_size, dtype=tf.int32) * (time+1)
            next_inputs = control_flow_ops.cond(
                all_finished,
                # If we're finished, the next_inputs value doesn't matter
                lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids, times))

        return finished, next_inputs, state 
Example #23
Source File: layers.py    From tensornets with MIT License 5 votes vote down vote up
def _lower_bound_grad(op, grad):
    """Gradient for `_lower_bound`.

    Args:
      op: the tensorflow op for which to calculate a gradient
      grad: gradient with respect to the output of the op

    Returns:
      gradients with respect to the inputs of the op
    """
    inputs = op.inputs[0]
    bound = op.inputs[1]
    pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)
    return [math_ops.cast(pass_through_if, grad.dtype) * grad, None] 
Example #24
Source File: check_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _dynamic_rank_in(actual_rank, given_ranks):
  if len(given_ranks) < 1:
    return ops.convert_to_tensor(False)
  result = math_ops.equal(given_ranks[0], actual_rank)
  for given_rank in given_ranks[1:]:
    result = math_ops.logical_or(
        result, math_ops.equal(given_rank, actual_rank))
  return result 
Example #25
Source File: uniform.py    From lambda-packs with MIT License 5 votes vote down vote up
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape_tensor())
    return array_ops.where(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        array_ops.where(
            math_ops.logical_or(broadcasted_x < self.low,
                                broadcasted_x >= self.high),
            array_ops.zeros_like(broadcasted_x),
            array_ops.ones_like(broadcasted_x) / self.range())) 
Example #26
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if not input_tensor.dtype.is_integer:
      raise ValueError(
          'Invalid input, not integer. key: {} dtype: {}'.format(
              self.key, input_tensor.dtype))

    values = math_ops.to_int64(input_tensor.values, name='values')
    num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
    zero = math_ops.to_int64(0, name='zero')
    if self.default_value is None:
      # Fail if values are out-of-range.
      assert_less = check_ops.assert_less(
          values, num_buckets, data=(values, num_buckets),
          name='assert_less_than_num_buckets')
      assert_greater = check_ops.assert_greater_equal(
          values, zero, data=(values,),
          name='assert_greater_or_equal_0')
      with ops.control_dependencies((assert_less, assert_greater)):
        values = array_ops.identity(values)
    else:
      # Assign default for out-of-range values.
      values = array_ops.where(
          math_ops.logical_or(
              values < zero, values >= num_buckets, name='out_of_range'),
          array_ops.fill(
              dims=array_ops.shape(values),
              value=math_ops.to_int64(self.default_value),
              name='default_values'),
          values)

    return sparse_tensor_lib.SparseTensor(
        indices=input_tensor.indices,
        values=values,
        dense_shape=input_tensor.dense_shape) 
Example #27
Source File: tfexample_decoder.py    From lambda-packs with MIT License 5 votes vote down vote up
def _decode(self, image_buffer, image_format):
    """Decodes the image buffer.

    Args:
      image_buffer: The tensor representing the encoded image tensor.
      image_format: The image format for the image in `image_buffer`. If image
        format is `raw`, all images are expected to be in this format, otherwise
        this op can decode a mix of `jpg` and `png` formats.

    Returns:
      A tensor that represents decoded image of self._shape, or
      (?, ?, self._channels) if self._shape is not specified.
    """
    def decode_image():
      """Decodes a png or jpg based on the headers."""
      return image_ops.decode_image(image_buffer, self._channels)

    def decode_raw():
      """Decodes a raw image."""
      return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)

    pred_fn_pairs = {
        math_ops.logical_or(
            math_ops.equal(image_format, 'raw'),
            math_ops.equal(image_format, 'RAW')): decode_raw,
    }
    image = control_flow_ops.case(
        pred_fn_pairs, default=decode_image, exclusive=True)

    image.set_shape([None, None, self._channels])
    if self._shape is not None:
      image = array_ops.reshape(image, self._shape)

    return image 
Example #28
Source File: boolean_mask.py    From lambda-packs with MIT License 5 votes vote down vote up
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain) 
Example #29
Source File: core.py    From lambda-packs with MIT License 5 votes vote down vote up
def __or__(self, other):
    return logical_or(self, other) 
Example #30
Source File: check_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _dynamic_rank_in(actual_rank, given_ranks):
  if len(given_ranks) < 1:
    return ops.convert_to_tensor(False)
  result = math_ops.equal(given_ranks[0], actual_rank)
  for given_rank in given_ranks[1:]:
    result = math_ops.logical_or(
        result, math_ops.equal(given_rank, actual_rank))
  return result