Python tensorflow.compat.v1.equal() Examples

The following are 30 code examples of tensorflow.compat.v1.equal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: lstm_models.py    From magenta with Apache License 2.0 6 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    b_enc, b_dec = tf.split(
        flat_rnn_output,
        [self._nade.num_hidden, self._output_depth], axis=1)
    ll, cond_probs = self._nade.log_prob(
        flat_x_target, b_enc=b_enc, b_dec=b_dec)
    r_loss = -ll
    flat_truth = tf.cast(flat_x_target, tf.bool)
    flat_predictions = tf.greater_equal(cond_probs, 0.5)

    metric_map = {
        'metrics/accuracy':
            tf.metrics.mean(
                tf.reduce_all(tf.equal(flat_truth, flat_predictions), axis=-1)),
        'metrics/recall':
            tf.metrics.recall(flat_truth, flat_predictions),
        'metrics/precision':
            tf.metrics.precision(flat_truth, flat_predictions),
    }

    return r_loss, metric_map 
Example #2
Source File: multistep_with_adamoptimizer.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(
            tf.zeros_like(grad_acc), use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
Example #3
Source File: multistep_optimizer.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
                                              use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
Example #4
Source File: multistep_optimizer.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
Example #5
Source File: beam_search.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def top_k_with_unique(inputs, k):
  """Finds the values and indices of the k largests entries.

  Instead of doing sort like tf.nn.top_k, this function finds the max value
  k times. The running time is proportional to k, which is be faster when k
  is small. The current implementation supports only inputs of rank 2.
  In addition, iota is used to replace the lower bits of each element, this
  makes the selection more stable when there are equal elements. The
  overhead is that output values are approximated.

  Args:
    inputs: A tensor with rank of 2. [batch_size, original_size].
    k: An integer, number of top elements to select.

  Returns:
    top_values: A tensor, the k largest elements in sorted order.
      [batch_size, k].
    indices: A tensor, indices of the top_values. [batch_size, k].
  """
  unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))
  top_values, indices = _create_topk_unique(unique_inputs, k)
  top_values = tf.cast(top_values, inputs.dtype)
  return top_values, indices 
Example #6
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
Example #7
Source File: utils.py    From mesh with Apache License 2.0 6 votes vote down vote up
def clean_decodes(ids, eos_id=1, pad_id=0, length_axis=-1):
  """Replaces everything after EOS with PAD (along last axis).

  Args:
    ids: a d Tensor of type int.
    eos_id: int, EOS id.
    pad_id: int, PAD id.
    length_axis: an integer.

  Returns:
    a Tensor of type int of ids.
  """
  eos_and_after = tf.cumsum(tf.cast(tf.equal(ids, eos_id), tf.int32),
                            exclusive=True, axis=length_axis)
  valid_ids = tf.equal(eos_and_after, 0)
  return tf.where_v2(valid_ids, ids, pad_id) 
Example #8
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
  """Percentage of times that top-k predictions matches labels on non-0s."""
  with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)
    effective_k = tf.minimum(k,
                             common_layers.shape_list(padded_predictions)[-1])
    _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
    outputs = tf.to_int32(outputs)
    padded_labels = tf.to_int32(padded_labels)
    padded_labels = tf.expand_dims(padded_labels, axis=-1)
    padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
    same = tf.to_float(tf.equal(outputs, padded_labels))
    same_topk = tf.reduce_sum(same, axis=-1)
    return same_topk, weights 
Example #9
Source File: decode_utils.py    From language with Apache License 2.0 6 votes vote down vote up
def _get_action_id(extended_indices, action_types, output_vocab_size,
                   model_config):
  """Returns action_id tensor."""
  # This initial value will be broadcast to the length of decode_steps.
  action_ids = tf.constant(0, dtype=tf.int64)
  for action_type_range in _get_action_types_to_range(output_vocab_size,
                                                      model_config):
    is_type = tf.equal(
        tf.constant(action_type_range.action_type, dtype=tf.int64),
        action_types)
    # For each timestep, exactly one of the action_type_ranges will be added,
    # so this sum will populate each entry on exactly one iteration.
    action_ids += (
        tf.to_int64(is_type) *
        (extended_indices - action_type_range.start_index))
  return action_ids 
Example #10
Source File: dropout.py    From lamb with Apache License 2.0 6 votes vote down vote up
def _build(self, x, state):
    prev_keep_mask = state
    shape = tf.shape(x)
    noise = tf.random_uniform(shape, dtype=x.dtype)
    other_mask = tf.floor(self._keep_prob + noise)
    choice_noise = tf.random_uniform(shape, dtype=x.dtype)
    choice = tf.less(choice_noise, self._flip_prob)
    # KLUDGE(melisgl): The client has to pass the last keep_mask from
    # a batch to the next so the mask may end up next to some
    # recurrent cell state. This state is often zero at the beginning
    # and may be periodically zeroed (per example) during training.
    # While zeroing LSTM state is okay, zeroing the dropout mask is
    # not. So instead of forcing every client to deal with this common
    # (?) case, if an all zero mask is detected, then regenerate a
    # fresh mask. This is of course a major hack and won't help with
    # learnt initial states, for example.
    sum_ = tf.reduce_sum(prev_keep_mask, 1, keepdims=True)
    is_initializing = tf.equal(sum_, 0.0)

    self._keep_mask = tf.where(tf.logical_or(choice, is_initializing),
                               other_mask,
                               prev_keep_mask)
    self._time_step += 1
    return x * self._keep_mask / self._keep_prob * self._scaler 
Example #11
Source File: dropout.py    From lamb with Apache License 2.0 6 votes vote down vote up
def _ensure_keep_mask(self, x):
    if self._keep_mask is None or not self._share_mask:
      shape = tf.shape(x)
      k = shape[1]
      # To make this class a drop-in replacement for bernoulli dropout we
      # paramaterize it with keep_prob. Set alpha of the dirichlet so that the
      # variance is equal to the variance of the bernoulli with p=keep_prob
      # divided by keep_prob.
      # Now the variance of the dirichlet with k equal alphas is
      # (k-1)/(k^2(k*alpha+1). Solve that for alpha.
      kf = tf.cast(k, tf.float32)
      alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf
      dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha)
      assert (dist.reparameterization_type ==
              tfp.distributions.FULLY_REPARAMETERIZED)
      # The E[dir(alpha)] = 1/k for all elements, but we want the expectation to
      # be keep_prob, hence the multiplication.
      self._keep_mask = kf * dist.sample(shape[0])
      self._keep_mask.set_shape(x.get_shape())
    return self._keep_mask 
Example #12
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
Example #13
Source File: sample.py    From gpt2-estimator with MIT License 6 votes vote down vote up
def top_k_logits(logits, k):
    if k == 0:
        # no truncation
        return logits

    def _top_k():
        values, _ = tf.nn.top_k(logits, k=k)
        min_values = values[:, -1, tf.newaxis]
        return tf.where(
            logits < min_values,
            tf.ones_like(logits, dtype=logits.dtype) * -1e10,
            logits,
        )
    return tf.cond(
        tf.equal(k, 0),
        lambda: logits,
        lambda: _top_k(),
    ) 
Example #14
Source File: loss.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def _build_attack_loss(self, labels):
    """Build adversarial loss using PGD attack."""
    # PGD attack.
    if not self._attack:
      self._attack_accuracy = tf.constant(0.)
      self._attack_success = tf.constant(1.)
      self._attack_cross_entropy = tf.constant(0.)
      return
    if not isinstance(self._predictor.inputs, tf.Tensor):
      raise ValueError('Multiple inputs is not supported.')
    self._attack(self._predictor.inputs, labels)
    correct_examples = tf.equal(labels, tf.argmax(self._attack.logits, 1))
    self._attack_accuracy = tf.reduce_mean(
        tf.cast(correct_examples, tf.float32))
    self._attack_success = tf.reduce_mean(
        tf.cast(self._attack.success, tf.float32))
    if self._label_smoothing > 0:
      attack_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
          labels=self._one_hot_labels, logits=self._attack.logits)
    else:
      attack_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=labels, logits=self._attack.logits)
    self._attack_cross_entropy = tf.reduce_mean(attack_cross_entropy) 
Example #15
Source File: loss.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def _build_nominal_loss(self, labels):
    """Build natural cross-entropy loss on clean data."""
    # Cross-entropy.
    nominal_logits = self._predictor.logits
    if self._label_smoothing > 0:
      num_classes = nominal_logits.shape[1].value
      one_hot_labels = tf.one_hot(labels, num_classes)
      smooth_positives = 1. - self._label_smoothing
      smooth_negatives = self._label_smoothing / num_classes
      one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
      nominal_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
          labels=one_hot_labels, logits=nominal_logits)
      self._one_hot_labels = one_hot_labels
    else:
      nominal_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=labels, logits=nominal_logits)
    self._cross_entropy = tf.reduce_mean(nominal_cross_entropy)
    # Accuracy.
    nominal_correct_examples = tf.equal(labels, tf.argmax(nominal_logits, 1))
    self._nominal_accuracy = tf.reduce_mean(
        tf.cast(nominal_correct_examples, tf.float32)) 
Example #16
Source File: lstm_models.py    From magenta with Apache License 2.0 6 votes vote down vote up
def build(self, hparams, output_depth, is_training=True):
    self.hparams = hparams
    self._output_depth = output_depth
    self._total_length = hparams.max_seq_len
    if self._total_length != np.prod(self._level_lengths):
      raise ValueError(
          'The product of the HierarchicalLstmDecoder level lengths (%d) must '
          'equal the padded input sequence length (%d).' % (
              np.prod(self._level_lengths), self._total_length))
    tf.logging.info('\nHierarchical Decoder:\n'
                    '  input length: %d\n'
                    '  level output lengths: %s\n',
                    self._total_length,
                    self._level_lengths)

    self._hier_cells = [
        lstm_utils.rnn_cell(
            hparams.dec_rnn_size,
            dropout_keep_prob=hparams.dropout_keep_prob,
            residual=hparams.residual_decoder)
        # Subtract 1 for the core decoder level
        for _ in range(len(self._level_lengths) - 1)]

    with tf.variable_scope('core_decoder', reuse=tf.AUTO_REUSE):
      self._core_decoder.build(hparams, output_depth, is_training) 
Example #17
Source File: lstm_models.py    From magenta with Apache License 2.0 6 votes vote down vote up
def build(self, hparams, is_training=True):
    self._total_length = hparams.max_seq_len
    if self._total_length != np.prod(self._level_lengths):
      raise ValueError(
          'The product of the HierarchicalLstmEncoder level lengths (%d) must '
          'equal the padded input sequence length (%d).' % (
              np.prod(self._level_lengths), self._total_length))
    tf.logging.info('\nHierarchical Encoder:\n'
                    '  input length: %d\n'
                    '  level lengths: %s\n',
                    self._total_length,
                    self._level_lengths)
    self._hierarchical_encoders = []
    num_splits = int(np.prod(self._level_lengths))
    for i, l in enumerate(self._level_lengths):
      num_splits //= l
      tf.logging.info('Level %d splits: %d', i, num_splits)
      h_encoder = self._core_encoder_cls()
      h_encoder.build(
          hparams, is_training,
          name_or_scope=tf.VariableScope(
              tf.AUTO_REUSE, 'encoder/hierarchical_level_%d' % i))
      self._hierarchical_encoders.append((num_splits, h_encoder)) 
Example #18
Source File: utils.py    From magenta with Apache License 2.0 6 votes vote down vote up
def softmax_summaries(loss, logits, one_hot_labels, name="softmax"):
  """Create the softmax summaries for this cross entropy loss.

  Args:
    loss: Cross-entropy loss.
    logits: The [batch_size, classes] float tensor representing the logits.
    one_hot_labels: The float tensor representing actual class ids. If this is
      [batch_size, classes], then we take the argmax of it first.
    name: Prepended to summary scope.
  """
  tf.summary.scalar(name + "_loss", loss)

  one_hot_labels = tf.cond(
      tf.equal(tf.rank(one_hot_labels),
               2), lambda: tf.to_int32(tf.argmax(one_hot_labels, 1)),
      lambda: tf.to_int32(one_hot_labels))

  in_top_1 = tf.nn.in_top_k(logits, one_hot_labels, 1)
  tf.summary.scalar(name + "_precision@1",
                    tf.reduce_mean(tf.to_float(in_top_1)))
  in_top_5 = tf.nn.in_top_k(logits, one_hot_labels, 5)
  tf.summary.scalar(name + "_precision@5",
                    tf.reduce_mean(tf.to_float(in_top_5))) 
Example #19
Source File: vqa_utils.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-1].

  Args:
    x: input Tensor.
    func: Python function to apply.
    num_cases: Python int32, number of cases to sample sel from.

  Returns:
    The result of func(x, sel), where func receives the value of the
    selector as a python integer, but sel is sampled dynamically.
  """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)
  ])[0] 
Example #20
Source File: ngram.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def call(self, inputs):
    batch_shape = tf.shape(inputs)[:-1]
    length = tf.shape(inputs)[-1]
    ngram_range_counts = []
    for n in range(self.minval, self.maxval):
      # Reshape inputs from [..., length] to [..., 1, length // n, n], dropping
      # remainder elements. Each n-vector is an ngram.
      reshaped_inputs = tf.reshape(
          inputs[..., :(n * (length // n))],
          tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0))
      # Count the number of times each ngram appears in the input. We do so by
      # checking whether each n-vector in the input is equal to each n-vector
      # in a Tensor of all possible ngrams. The comparison is batched between
      # the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor
      # of shape [..., input_dim**n, 1, n].
      ngrams = tf.reshape(
          list(np.ndindex((self.input_dim,) * n)),
          [1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n])
      is_ngram = tf.equal(
          tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32),
                        axis=-1),
          n)
      ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1)
      ngram_range_counts.append(ngram_counts)
    return tf.concat(ngram_range_counts, axis=-1) 
Example #21
Source File: seq2seq.py    From magenta with Apache License 2.0 6 votes vote down vote up
def _call_sampler(sample_n_fn, sample_shape, name=None):
  """Reshapes vector of samples."""
  with tf.name_scope(name, "call_sampler", values=[sample_shape]):
    sample_shape = tf.convert_to_tensor(
        sample_shape, dtype=tf.int32, name="sample_shape")
    # Ensure sample_shape is a vector (vs just a scalar).
    pad = tf.cast(tf.equal(tf.rank(sample_shape), 0), tf.int32)
    sample_shape = tf.reshape(
        sample_shape,
        tf.pad(tf.shape(sample_shape),
               paddings=[[pad, 0]],
               constant_values=1))
    samples = sample_n_fn(tf.reduce_prod(sample_shape))
    batch_event_shape = tf.shape(samples)[1:]
    final_shape = tf.concat([sample_shape, batch_event_shape], 0)
    return tf.reshape(samples, final_shape) 
Example #22
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def top_1_tpu(inputs):
  """find max and argmax over the last dimension.

  Works well on TPU

  Args:
    inputs: A tensor with shape [..., depth]

  Returns:
    values: a Tensor with shape [...]
    indices: a Tensor with shape [...]
  """
  inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
  mask = tf.to_int32(tf.equal(inputs_max, inputs))
  index = tf.range(tf.shape(inputs)[-1]) * mask
  return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) 
Example #23
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def weights_multi_problem(labels, taskid=-1):
  """Assign weight 1.0 to only the "targets" portion of the labels.

  Weight 1.0 is assigned to all labels past the taskid.

  Args:
    labels: A Tensor of int32s.
    taskid: an int32 representing the task id for a problem.

  Returns:
    A Tensor of floats.

  Raises:
    ValueError: The Task ID must be valid.
  """
  taskid = check_nonnegative(taskid)
  past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
  # Additionally zero out the task id location
  past_taskid *= to_float(tf.not_equal(labels, taskid))
  non_taskid = to_float(labels)
  return to_float(tf.not_equal(past_taskid * non_taskid, 0)) 
Example #24
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
  """Pooling (supports "LEFT")."""
  with tf.name_scope("pool", values=[inputs]):
    static_shape = inputs.get_shape()
    if not static_shape or len(static_shape) != 4:
      raise ValueError("Inputs to conv must have statically known rank 4.")
    # Add support for left padding.
    if padding == "LEFT":
      assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
      if len(static_shape) == 3:
        width_padding = 2 * (window_size[1] // 2)
        padding_ = [[0, 0], [width_padding, 0], [0, 0]]
      else:
        height_padding = 2 * (window_size[0] // 2)
        cond_padding = tf.cond(
            tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
            lambda: tf.constant(2 * (window_size[1] // 2)))
        width_padding = 0 if static_shape[2] == 1 else cond_padding
        padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
      inputs = tf.pad(inputs, padding_)
      inputs.set_shape([static_shape[0], None, None, static_shape[3]])
      padding = "VALID"

  return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides) 
Example #25
Source File: region_similarity_calculator.py    From Object_Detection_Tracking with Apache License 2.0 6 votes vote down vote up
def iou(boxlist1, boxlist2, scope=None):
  """Computes pairwise intersection-over-union between box collections.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise iou scores.
  """
  with tf.name_scope(scope, 'IOU'):
    intersections = intersection(boxlist1, boxlist2)
    areas1 = area(boxlist1)
    areas2 = area(boxlist2)
    unions = (
        tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
    return tf.where(
        tf.equal(intersections, 0.0),
        tf.zeros_like(intersections), tf.truediv(intersections, unions)) 
Example #26
Source File: specification.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def __init__(self, c, d=None, prune_irrelevant=True, collapse=True):
    """Builds a linear specification module."""
    super(LinearSpecification, self).__init__(name='specs', collapse=collapse)
    # c has shape [batch_size, num_specifications, num_outputs]
    # d has shape [batch_size, num_specifications]
    # Some specifications may be irrelevant (not a function of the output).
    # We automatically remove them for clarity. We expect the number of
    # irrelevant specs to be equal for all elements of a batch.
    # Shape is [batch_size, num_specifications]
    if prune_irrelevant:
      irrelevant = tf.equal(tf.reduce_sum(
          tf.cast(tf.abs(c) > 1e-6, tf.int32), axis=-1, keepdims=True), 0)
      batch_size = tf.shape(c)[0]
      num_outputs = tf.shape(c)[2]
      irrelevant = tf.tile(irrelevant, [1, 1, num_outputs])
      self._c = tf.reshape(
          tf.boolean_mask(c, tf.logical_not(irrelevant)),
          [batch_size, -1, num_outputs])
    else:
      self._c = c
    self._d = d 
Example #27
Source File: image_utils.py    From magenta with Apache License 2.0 5 votes vote down vote up
def _smallest_size_at_least(height, width, smallest_side):
  """Computes new shape with the smallest side equal to `smallest_side`.

  Computes new shape with the smallest side equal to `smallest_side` while
  preserving the original aspect ratio.

  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

  height = tf.to_float(height)
  width = tf.to_float(width)
  smallest_side = tf.to_float(smallest_side)

  scale = tf.cond(tf.greater(height, width),
                  lambda: smallest_side / width,
                  lambda: smallest_side / height)
  new_height = tf.to_int32(height * scale)
  new_width = tf.to_int32(width * scale)
  return new_height, new_width 
Example #28
Source File: utils.py    From magenta with Apache License 2.0 5 votes vote down vote up
def inv_mu_law_numpy(x, mu=255.0):
  """A numpy implementation of inverse Mu-Law.

  Args:
    x: The Mu-Law samples to decode.
    mu: The Mu we used to encode these samples.

  Returns:
    out: The decoded data.
  """
  x = np.array(x).astype(np.float32)
  out = (x + 0.5) * 2. / (mu + 1)
  out = np.sign(out) / mu * ((1 + mu)**np.abs(out) - 1)
  out = np.where(np.equal(x, 0), x, out)
  return out 
Example #29
Source File: lib_tfsampling.py    From magenta with Apache License 2.0 5 votes vote down vote up
def make_outer_masks(self, outer_masks, input_pianorolls):
    """Returns outer masks, if all zeros created by completion masking."""
    outer_masks = tf.to_float(outer_masks)
    # If outer_masks come in as all zeros, it means there's no masking,
    # which also means nothing will be generated. In this case, use
    # completion mask to make new outer masks.
    outer_masks = tf.cond(
        tf.reduce_all(tf.equal(outer_masks, 0)),
        lambda: make_completion_masks(input_pianorolls),
        lambda: outer_masks)
    return outer_masks 
Example #30
Source File: lib_tfsampling.py    From magenta with Apache License 2.0 5 votes vote down vote up
def sample_with_temperature(logits, temperature):
  """Either argmax after softmax or random sample along the pitch axis.

  Args:
    logits: a Tensor of shape (batch, time, pitch, instrument).
    temperature: a float  0.0=argmax 1.0=random

  Returns:
    a Tensor of the same shape, with one_hots on the pitch dimension.
  """
  logits = tf.transpose(logits, [0, 1, 3, 2])
  pitch_range = tf.shape(logits)[-1]

  def sample_from_logits(logits):
    with tf.control_dependencies([tf.assert_greater(temperature, 0.0)]):
      logits = tf.identity(logits)
    reshaped_logits = (
        tf.reshape(logits, [-1, tf.shape(logits)[-1]]) / temperature)
    choices = tf.multinomial(reshaped_logits, 1)
    choices = tf.reshape(choices,
                         tf.shape(logits)[:logits.get_shape().ndims - 1])
    return choices

  choices = tf.cond(tf.equal(temperature, 0.0),
                    lambda: tf.argmax(tf.nn.softmax(logits), -1),
                    lambda: sample_from_logits(logits))
  samples_onehot = tf.one_hot(choices, pitch_range)
  return tf.transpose(samples_onehot, [0, 1, 3, 2])