Python tensorflow.assert_positive() Examples

The following are 30 code examples of tensorflow.assert_positive(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: bulk_component.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def build_cross_entropy_loss(logits, gold):
  """Constructs a cross entropy from logits and one-hot encoded gold labels.

  Supports skipping rows where the gold label is the magic -1 value.

  Args:
    logits: float Tensor of scores.
    gold: int Tensor of gold label ids.

  Returns:
    cost, correct, total: the total cost, the total number of correctly
        predicted labels, and the total number of valid labels.
  """
  valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
  gold = tf.gather(gold, valid)
  logits = tf.gather(logits, valid)
  correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
  total = tf.size(gold)
  with tf.control_dependencies([tf.assert_positive(total)]):
    cost = tf.reduce_sum(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.cast(gold, tf.int64), logits=logits)) / tf.cast(
                total, tf.float32)
  return cost, correct, total 
Example #2
Source File: bulk_component.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def build_cross_entropy_loss(logits, gold):
  """Constructs a cross entropy from logits and one-hot encoded gold labels.

  Supports skipping rows where the gold label is the magic -1 value.

  Args:
    logits: float Tensor of scores.
    gold: int Tensor of one-hot labels.

  Returns:
    cost, correct, total: the total cost, the total number of correctly
        predicted labels, and the total number of valid labels.
  """
  valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
  gold = tf.gather(gold, valid)
  logits = tf.gather(logits, valid)
  correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
  total = tf.size(gold)
  with tf.control_dependencies([tf.assert_positive(total)]):
    cost = tf.reduce_sum(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.cast(gold, tf.int64), logits=logits)) / tf.cast(
                total, tf.float32)
  return cost, correct, total 
Example #3
Source File: fisher_blocks.py    From kfac with Apache License 2.0 6 votes vote down vote up
def compute_pi_tracenorm(left_cov, right_cov):
  """Computes the scalar constant pi for Tikhonov regularization/damping.

  pi = sqrt( (trace(A) / dim(A)) / (trace(B) / dim(B)) )
  See section 6.3 of https://arxiv.org/pdf/1503.05671.pdf for details.

  Args:
    left_cov: A LinearOperator object. The left Kronecker factor "covariance".
    right_cov: A LinearOperator object. The right Kronecker factor "covariance".

  Returns:
    The computed scalar constant pi for these Kronecker Factors (as a Tensor).
  """
  # Instead of dividing by the dim of the norm, we multiply by the dim of the
  # other norm. This works out the same in the ratio.
  left_norm = left_cov.trace() * int(right_cov.domain_dimension)
  right_norm = right_cov.trace() * int(left_cov.domain_dimension)
  assert_positive = tf.assert_positive(
      right_norm,
      message="PI computation, trace of right cov matrix should be positive. "
      "Note that most likely cause of this error is that the optimizer "
      "diverged (e.g. due to bad hyperparameters).")
  with tf.control_dependencies([assert_positive]):
    pi = tf.sqrt(left_norm / right_norm)
  return pi 
Example #4
Source File: bulk_component.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def build_cross_entropy_loss(logits, gold):
  """Constructs a cross entropy from logits and one-hot encoded gold labels.

  Supports skipping rows where the gold label is the magic -1 value.

  Args:
    logits: float Tensor of scores.
    gold: int Tensor of one-hot labels.

  Returns:
    cost, correct, total: the total cost, the total number of correctly
        predicted labels, and the total number of valid labels.
  """
  valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
  gold = tf.gather(gold, valid)
  logits = tf.gather(logits, valid)
  correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
  total = tf.size(gold)
  with tf.control_dependencies([tf.assert_positive(total)]):
    cost = tf.reduce_sum(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.cast(gold, tf.int64), logits=logits)) / tf.cast(
                total, tf.float32)
  return cost, correct, total 
Example #5
Source File: bulk_component.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def build_cross_entropy_loss(logits, gold):
  """Constructs a cross entropy from logits and one-hot encoded gold labels.

  Supports skipping rows where the gold label is the magic -1 value.

  Args:
    logits: float Tensor of scores.
    gold: int Tensor of gold label ids.

  Returns:
    cost, correct, total: the total cost, the total number of correctly
        predicted labels, and the total number of valid labels.
  """
  valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
  gold = tf.gather(gold, valid)
  logits = tf.gather(logits, valid)
  correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
  total = tf.size(gold)
  with tf.control_dependencies([tf.assert_positive(total)]):
    cost = tf.reduce_sum(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.cast(gold, tf.int64), logits=logits)) / tf.cast(
                total, tf.float32)
  return cost, correct, total 
Example #6
Source File: bulk_component.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def build_cross_entropy_loss(logits, gold):
  """Constructs a cross entropy from logits and one-hot encoded gold labels.

  Supports skipping rows where the gold label is the magic -1 value.

  Args:
    logits: float Tensor of scores.
    gold: int Tensor of one-hot labels.

  Returns:
    cost, correct, total: the total cost, the total number of correctly
        predicted labels, and the total number of valid labels.
  """
  valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
  gold = tf.gather(gold, valid)
  logits = tf.gather(logits, valid)
  correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
  total = tf.size(gold)
  with tf.control_dependencies([tf.assert_positive(total)]):
    cost = tf.reduce_sum(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.cast(gold, tf.int64), logits=logits)) / tf.cast(
                total, tf.float32)
  return cost, correct, total 
Example #7
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_zero(self):
    with self.test_session():
      meechum = tf.constant([0], name="meechum")
      with tf.control_dependencies([tf.assert_positive(meechum)]):
        out = tf.identity(meechum)
      with self.assertRaisesOpError("meechum"):
        out.eval() 
Example #8
Source File: reader.py    From iss-rnns with Apache License 2.0 5 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.
  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.
  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).
  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.
  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.strided_slice(data, [0, i * num_steps],
                         [batch_size, (i + 1) * num_steps])
    x.set_shape([batch_size, num_steps])
    y = tf.strided_slice(data, [0, i * num_steps + 1],
                         [batch_size, (i + 1) * num_steps + 1])
    y.set_shape([batch_size, num_steps])
    return x, y 
Example #9
Source File: seq_batch.py    From lang2program with Apache License 2.0 5 votes vote down vote up
def reduce_max(seq_batch):
    sums = tf.reduce_sum(seq_batch.mask, 1, keep_dims=True)  # (batch_size, 1)
    with tf.control_dependencies([tf.assert_positive(sums)]):  # avoid dividing by zero
        seq_batch = seq_batch.with_pad_value(float('-inf'))  # set pad values to -inf
        result = tf.reduce_max(seq_batch.values, 1)
    return result 
Example #10
Source File: seq_batch.py    From lang2program with Apache License 2.0 5 votes vote down vote up
def reduce_mean(seq_batch, allow_empty=False):
    """Compute the mean of each sequence in a SequenceBatch.

    Args:
        seq_batch (SequenceBatch): a SequenceBatch with the following attributes:
            values (Tensor): a Tensor of shape (batch_size, seq_length, :, ..., :)
            mask (Tensor): if the mask values are arbitrary floats (rather than binary), the mean will be
            a weighted average.
        allow_empty (bool): allow computing the average of an empty sequence. In this case, we assume 0/0 == 0, rather
            than NaN. Default is False, causing an error to be thrown.

    Returns:
        Tensor: of shape (batch_size, :, ..., :)
    """
    values, mask = seq_batch.values, seq_batch.mask
    # compute weights for the average
    sums = tf.reduce_sum(mask, 1, keep_dims=True)  # (batch_size, 1)

    if allow_empty:
        asserts = []  # no assertion
        sums = tf.select(tf.equal(sums, 0), tf.ones(tf.shape(sums)), sums)  # replace 0's with 1's
    else:
        asserts = [tf.assert_positive(sums)]  # throw error if 0's exist

    with tf.control_dependencies(asserts):
        weights = mask / sums  # (batch_size, seq_length)
    return weighted_sum(seq_batch, weights) 
Example #11
Source File: seq_batch.py    From lang2program with Apache License 2.0 5 votes vote down vote up
def reduce_max(seq_batch):
    sums = tf.reduce_sum(seq_batch.mask, 1, keep_dims=True)  # (batch_size, 1)
    with tf.control_dependencies([tf.assert_positive(sums)]):  # avoid dividing by zero
        seq_batch = seq_batch.with_pad_value(float('-inf'))  # set pad values to -inf
        result = tf.reduce_max(seq_batch.values, 1)
    return result 
Example #12
Source File: seq_batch.py    From lang2program with Apache License 2.0 5 votes vote down vote up
def reduce_mean(seq_batch, allow_empty=False):
    """Compute the mean of each sequence in a SequenceBatch.

    Args:
        seq_batch (SequenceBatch): a SequenceBatch with the following attributes:
            values (Tensor): a Tensor of shape (batch_size, seq_length, :, ..., :)
            mask (Tensor): if the mask values are arbitrary floats (rather than binary), the mean will be
            a weighted average.
        allow_empty (bool): allow computing the average of an empty sequence. In this case, we assume 0/0 == 0, rather
            than NaN. Default is False, causing an error to be thrown.

    Returns:
        Tensor: of shape (batch_size, :, ..., :)
    """
    values, mask = seq_batch.values, seq_batch.mask
    # compute weights for the average
    sums = tf.reduce_sum(mask, 1, keep_dims=True)  # (batch_size, 1)

    if allow_empty:
        asserts = []  # no assertion
        sums = tf.select(tf.equal(sums, 0), tf.ones(tf.shape(sums)), sums)  # replace 0's with 1's
    else:
        asserts = [tf.assert_positive(sums)]  # throw error if 0's exist

    with tf.control_dependencies(asserts):
        weights = mask / sums  # (batch_size, seq_length)
    return weighted_sum(seq_batch, weights) 
Example #13
Source File: reader.py    From Fast-Slow-LSTM with Apache License 2.0 5 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
    y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
    return x, y 
Example #14
Source File: mobilenetdet_preprocessing.py    From MobileNet with Apache License 2.0 5 votes vote down vote up
def check_3d_image(image, require_static=True):
  """Assert that we are working with properly shaped image.

  Args:
    image: 3-D Tensor of shape [height, width, channels]
    require_static: If `True`, requires that all dimensions of `image` are
      known and non-zero.

  Raises:
    ValueError: if `image.shape` is not a 3-vector.

  Returns:
    An empty list, if `image` has fully defined dimensions. Otherwise, a list
    containing an assert op is returned.
  """
  try:
    image_shape = image.get_shape().with_rank(3)
  except ValueError:
    raise ValueError("'image' must be three-dimensional.")
  if require_static and not image_shape.is_fully_defined():
    raise ValueError("'image' must be fully defined.")
  if any(x == 0 for x in image_shape):
    raise ValueError("all dims of 'image.shape' must be > 0: %s" %
                     image_shape)
  if not image_shape.is_fully_defined():
    return [tf.assert_positive(tf.shape(image),
                                      ["all dims of 'image.shape' "
                                       "must be > 0."])]
  else:
    return [] 
Example #15
Source File: reader.py    From rotational-unit-of-memory with MIT License 5 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
    y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
    return x, y 
Example #16
Source File: reader.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
    y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
    return x, y 
Example #17
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_empty_tensor_doesnt_raise(self):
    # A tensor is positive when it satisfies:
    #   For every element x_i in x, x_i > 0
    # and an empty tensor has no elements, so this is trivially satisfied.
    # This is standard set theory.
    with self.test_session():
      empty = tf.constant([], name="empty")
      with tf.control_dependencies([tf.assert_positive(empty)]):
        out = tf.identity(empty)
      out.eval() 
Example #18
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_positive(self):
    with self.test_session():
      remmy = tf.constant([1, 2], name="remmy")
      with tf.control_dependencies([tf.assert_positive(remmy)]):
        out = tf.identity(remmy)
      out.eval() 
Example #19
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_negative(self):
    with self.test_session():
      freddie = tf.constant([-1, -2], name="freddie")
      with tf.control_dependencies(
          [tf.assert_positive(freddie, message="fail")]):
        out = tf.identity(freddie)
      with self.assertRaisesOpError("fail.*freddie"):
        out.eval() 
Example #20
Source File: distributions.py    From rltf with MIT License 5 votes vote down vote up
def __init__(self, loc, log_scale=None, scale=None, validate_args=False, allow_nan_stats=True):

    parameters = dict(locals())

    assert log_scale.shape.ndims == 2
    assert (log_scale is None) != (scale is None)

    with tf.name_scope(self.__class__.__name__):

      if log_scale is not None:
        loc       = tf.identity(loc, name="loc")
        scale     = tf.exp(log_scale, name="scale")
        log_scale = tf.identity(log_scale, name="log_scale")
      elif scale is not None:
        with tf.control_dependencies([tf.assert_positive(scale)]):
          loc       = tf.identity(loc, name="loc")
          scale     = tf.identity(scale, name="scale")
          log_scale = tf.log(scale, name="log_scale")

      assert loc.dtype.base_dtype == tf.float32 or loc.dtype.base_dtype == tf.float64
      assert loc.dtype.base_dtype == log_scale.dtype.base_dtype == scale.dtype.base_dtype

    self.loc        = loc             # [batch_size, self.dim]
    self.log_scale  = log_scale       # [batch_size, self.dim] or [1, self.dim]
    self.scale      = scale           # [batch_size, self.dim] or [1, self.dim]
    self.dim        = self.loc.shape.as_list()[1]

    super().__init__(dtype=self.loc.dtype,
                     reparameterization_type=tf.distributions.FULLY_REPARAMETERIZED,
                     validate_args=validate_args,
                     allow_nan_stats=allow_nan_stats,
                     parameters=parameters,
                    ) 
Example #21
Source File: spectral_svgp.py    From nssm-gp with MIT License 5 votes vote down vote up
def _build_likelihood(self):
        """
        This gives a variational bound on the model likelihood.
        Adds also the prior term for the GP parameters.
        """

        # Get prior KL.
        KL = self.build_prior_KL()

        # Get conditionals
        fmean, fvar = self._build_predict(self.X, full_cov=False)

        #with tf.control_dependencies([tf.assert_positive(fvar, message='fvar negative: ')]):
        #    # Get variational expectations.
        var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y)

        # re-scale var_exp for minibatch size
        scale = (tf.cast(self.num_data, settings.tf_float)
                 / tf.cast(tf.shape(self.X)[0], settings.tf_float))
        var_exp = tf.reduce_sum(var_exp) * scale

        # latent functions have a whitened GP prior
        prior = float_type(0.0)
        for vars in zip(self.frequencies, self.variances, self.lengthscales):
            prior += -0.5 * sum(tf.reduce_sum(tf.square(x)) for x in vars)

        # re-scale prior for inducing point size
        # scale = tf.cast(self.num_data, settings.tf_float) / tf.cast(self.num_inducing, settings.tf_float)
        # prior = prior * scale

        # print tensors
        #var_exp = tf.Print(var_exp, [var_exp], message='var_exp:')
        #KL = tf.Print(KL, [KL], message='KL:')
        #prior = tf.Print(prior, [prior], message='prior:')
        likelihood = var_exp - KL + prior
        likelihood = tf.Print(likelihood, [likelihood], 'likelihood')
        return likelihood 
Example #22
Source File: spectral_svgp.py    From nssm-gp with MIT License 5 votes vote down vote up
def Kdiag(self, X, presliced=False):
        kdiag = 1e-4  # some jitter
        for variances in self.variances:
            kdiag += tf.square(self._interpolate(X, variances, self.Kvar, transform=tf.nn.softplus))
        with tf.control_dependencies([tf.assert_positive(kdiag, message='Kdiag negative: ')]):
            # kdiag = tf.Print(kdiag, [kdiag], 'kdiag: ')
            kdiag = tf.identity(kdiag)
        return tf.squeeze(kdiag) 
Example #23
Source File: reader.py    From bit-rnn with Apache License 2.0 5 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
    """Iterate on the raw PTB data.
    This chunks up raw_data into batches of examples and returns Tensors that
    are drawn from these batches.
    Args:
      raw_data: one of the raw data outputs from ptb_raw_data.
      batch_size: int, the batch size.
      num_steps: int, the number of unrolls.
      name: the name of this operation (optional).
    Returns:
      A pair of Tensors, each shaped [batch_size, num_steps]. The second element
      of the tuple is the same data time-shifted to the right by one.
    Raises:
      tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
    """
    with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(
            raw_data, name="raw_data", dtype=tf.int32)

        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0: batch_size * batch_len],
                          [batch_size, batch_len])

        epoch_size = (batch_len - 1) // num_steps
        assertion = tf.assert_positive(
            epoch_size,
            message="epoch_size == 0, decrease batch_size or num_steps")
        with tf.control_dependencies([assertion]):
            epoch_size = tf.identity(epoch_size, name="epoch_size")

        i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
        x = tf.strided_slice(data, [0, i * num_steps],
                             [batch_size, (i + 1) * num_steps])
        x.set_shape([batch_size, num_steps])
        y = tf.strided_slice(data, [0, i * num_steps + 1],
                             [batch_size, (i + 1) * num_steps + 1])
        y.set_shape([batch_size, num_steps])
        return x, y 
Example #24
Source File: attention.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 4 votes vote down vote up
def _build(self, inputs, mask_length, maxlen=None,):
        """
        Apply a memory mask such that the values we mask result in being the
        minimum possible value we can represent with a float32.

        Taken from Sonnet Attention Module

        :param inputs: [batch size, length], dtype=tf.float32
        :param memory_mask: [batch_size] shape Tensor of ints indicating the
            length of inputs
        :param maxlen: Sets the maximum length of the sequence; if None infered
            from inputs
        :returns: [batch size, length] dim Tensor with the mask applied
        """
        if len(mask_length.shape) != 1:
            raise ValueError('Mask Length must be a 1-d Tensor, got %s' % mask_length.shape)

        # [batch_size, length]
        memory_mask = tf.sequence_mask(mask_length, maxlen=maxlen, name='SequenceMask')
        inputs.shape.assert_is_compatible_with(memory_mask.shape)


        num_remaining_memory_slots = tf.reduce_sum(
            tf.cast(memory_mask, dtype=tf.int32), axis=[1])

        with tf.control_dependencies([tf.assert_positive(
            num_remaining_memory_slots)]):
            # Get the numerical limits of a float
            finfo = np.finfo(np.float32)

            # If True = 1 = Keep that memory slot
            kept_indices = tf.cast(memory_mask, dtype=tf.float32)

            # Inverse
            ignored_indices = tf.cast(tf.logical_not(memory_mask), dtype=tf.float32)

            # If we keep the indices its the max float value else its the
            # minimum float value. Then we can take the minimum
            lower_bound = finfo.max * kept_indices + finfo.min * ignored_indices
            slice_length = tf.reduce_max(mask_length)

            # Return the elementwise
            return tf.minimum(inputs[:, :slice_length],
                              lower_bound[:, :slice_length]) 
Example #25
Source File: rpn_model.py    From avod-ssd with MIT License 4 votes vote down vote up
def loss(self, prediction_dict):

        # these should include mini-batch values only
        objectness_gt = prediction_dict[self.PRED_MB_OBJECTNESS_GT]
        offsets_gt = prediction_dict[self.PRED_MB_OFFSETS_GT]

        # Predictions
        with tf.variable_scope('rpn_prediction_mini_batch'):
            objectness = prediction_dict[self.PRED_MB_OBJECTNESS]
            offsets = prediction_dict[self.PRED_MB_OFFSETS]

        with tf.variable_scope('rpn_losses'):
            with tf.variable_scope('objectness'):
                cls_loss = losses.WeightedSoftmaxLoss()
                cls_loss_weight = self._config.loss_config.cls_loss_weight
                objectness_loss = cls_loss(objectness,
                                           objectness_gt,
                                           weight=cls_loss_weight)

                with tf.variable_scope('obj_norm'):
                    # normalize by the number of anchor mini-batches
                    objectness_loss = objectness_loss / tf.cast(
                        tf.shape(objectness_gt)[0], dtype=tf.float32)
                    tf.summary.scalar('objectness', objectness_loss)

            with tf.variable_scope('regression'):
                reg_loss = losses.WeightedSmoothL1Loss()
                reg_loss_weight = self._config.loss_config.reg_loss_weight
                anchorwise_localization_loss = reg_loss(offsets,
                                                        offsets_gt,
                                                        weight=reg_loss_weight)
                masked_localization_loss = \
                    anchorwise_localization_loss * objectness_gt[:, 1]
                localization_loss = tf.reduce_sum(masked_localization_loss)

                with tf.variable_scope('reg_norm'):
                    # normalize by the number of positive objects
                    num_positives = tf.reduce_sum(objectness_gt[:, 1])
                    # Assert the condition `num_positives > 0`
                    with tf.control_dependencies(
                            [tf.assert_positive(num_positives)]):
                        localization_loss = localization_loss / num_positives
                        tf.summary.scalar('regression', localization_loss)

            with tf.variable_scope('total_loss'):
                total_loss = objectness_loss + localization_loss

        loss_dict = {
            self.LOSS_RPN_OBJECTNESS: objectness_loss,
            self.LOSS_RPN_REGRESSION: localization_loss,
        }

        return loss_dict, total_loss 
Example #26
Source File: tclm_reader.py    From TensorFlowBook with Apache License 2.0 4 votes vote down vote up
def tensorflow_code_producer(raw_data, batch_size, num_steps, name=None):
    """Iterate on the raw PTB data.

    This chunks up raw_data into batches of examples and returns Tensors that
    are drawn from these batches.

    Args:
        raw_data: one of the raw data outputs from ptb_raw_data.
        batch_size: int, the batch size.
        num_steps: int, the number of unrolls.
        name: the name of this operation (optional).

    Returns:
        A pair of Tensors, each shaped [batch_size, num_steps].
        The second element of the tuple is the same data time-shifted to the right by one.

    Raises:
        tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
    """
    with tf.name_scope(name, "TensorflowCodeProducer",
                       [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(raw_data, name="raw_data",
                                        dtype=tf.int32)

        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0: batch_size * batch_len],
                          [batch_size, batch_len])

        epoch_size = (batch_len - 1) // num_steps
        assertion = tf.assert_positive(
            epoch_size,
            message="epoch_size == 0, decrease batch_size or num_steps")
        with tf.control_dependencies([assertion]):
            epoch_size = tf.identity(epoch_size, name="epoch_size")

        i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
        x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
        x.set_shape([batch_size, num_steps])
        y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
        y.set_shape([batch_size, num_steps])
        return x, y 
Example #27
Source File: reader.py    From tensorpack with Apache License 2.0 4 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.strided_slice(data, [0, i * num_steps],
                         [batch_size, (i + 1) * num_steps])
    x.set_shape([batch_size, num_steps])
    y = tf.strided_slice(data, [0, i * num_steps + 1],
                         [batch_size, (i + 1) * num_steps + 1])
    y.set_shape([batch_size, num_steps])
    return x, y 
Example #28
Source File: reader.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.strided_slice(data, [0, i * num_steps],
                         [batch_size, (i + 1) * num_steps])
    x.set_shape([batch_size, num_steps])
    y = tf.strided_slice(data, [0, i * num_steps + 1],
                         [batch_size, (i + 1) * num_steps + 1])
    y.set_shape([batch_size, num_steps])
    return x, y 
Example #29
Source File: reader.py    From HumanRecognition with MIT License 4 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.strided_slice(data, [0, i * num_steps],
                         [batch_size, (i + 1) * num_steps])
    x.set_shape([batch_size, num_steps])
    y = tf.strided_slice(data, [0, i * num_steps + 1],
                         [batch_size, (i + 1) * num_steps + 1])
    y.set_shape([batch_size, num_steps])
    return x, y 
Example #30
Source File: reader.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def ptb_producer(raw_data, batch_size, num_steps, name=None):
  """Iterate on the raw PTB data.

  This chunks up raw_data into batches of examples and returns Tensors that
  are drawn from these batches.

  Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).

  Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.

  Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
  """
  with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
    raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

    data_len = tf.size(raw_data)
    batch_len = data_len // batch_size
    data = tf.reshape(raw_data[0 : batch_size * batch_len],
                      [batch_size, batch_len])

    epoch_size = (batch_len - 1) // num_steps
    assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
      epoch_size = tf.identity(epoch_size, name="epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
    x = tf.strided_slice(data, [0, i * num_steps],
                         [batch_size, (i + 1) * num_steps])
    x.set_shape([batch_size, num_steps])
    y = tf.strided_slice(data, [0, i * num_steps + 1],
                         [batch_size, (i + 1) * num_steps + 1])
    y.set_shape([batch_size, num_steps])
    return x, y