Python tensorflow.check_numerics() Examples

The following are 30 code examples of tensorflow.check_numerics(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: univariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _log_prob(self, given):
        logits = self.logits
        n = tf.cast(self.n_experiments, self.param_dtype)
        given = tf.cast(given, self.param_dtype)

        log_1_minus_p = -tf.nn.softplus(logits)
        lgamma_n_plus_1 = tf.lgamma(n + 1)
        lgamma_given_plus_1 = tf.lgamma(given + 1)
        lgamma_n_minus_given_plus_1 = tf.lgamma(n - given + 1)

        if self._check_numerics:
            lgamma_given_plus_1 = tf.check_numerics(
                lgamma_given_plus_1, "lgamma(given + 1)")
            lgamma_n_minus_given_plus_1 = tf.check_numerics(
                lgamma_n_minus_given_plus_1, "lgamma(n - given + 1)")

        return lgamma_n_plus_1 - lgamma_n_minus_given_plus_1 - \
            lgamma_given_plus_1 + given * logits + n * log_1_minus_p 
Example #2
Source File: in_graph_batch_env.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def simulate(self, action):
    """Step the batch of environments.

    The results of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the batch of actions to apply.

    Returns:
      Operation.
    """
    with tf.name_scope('environment/simulate'):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, 'action')
      observ_dtype = self._parse_dtype(self._batch_env.observation_space)
      observ, reward, done = tf.py_func(
          lambda a: self._batch_env.step(a)[:3], [action],
          [observ_dtype, tf.float32, tf.bool], name='step')
      observ = tf.check_numerics(observ, 'observ')
      reward = tf.check_numerics(reward, 'reward')
      return tf.group(
          self._observ.assign(observ),
          self._action.assign(action),
          self._reward.assign(reward),
          self._done.assign(done)) 
Example #3
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _log_prob(self, given):
        logits, temperature = self.path_param(self.logits), \
                              self.path_param(self.temperature)
        log_given = tf.log(given)
        log_temperature = tf.log(temperature)
        n = tf.cast(self.n_categories, self.dtype)

        if self._check_numerics:
            log_given = tf.check_numerics(log_given, "log(given)")
            log_temperature = tf.check_numerics(
                log_temperature, "log(temperature)")

        temp = logits - temperature * log_given

        return tf.lgamma(n) + (n - 1) * log_temperature + \
            tf.reduce_sum(temp - log_given, axis=-1) - \
            n * tf.reduce_logsumexp(temp, axis=-1) 
Example #4
Source File: messaging_cell_helpers.py    From shortest-path with The Unlicense 6 votes vote down vote up
def layer_normalize(tensor):
	'''Apologies if I've abused this term'''

	in_shape = tf.shape(tensor)
	axes = list(range(1, len(tensor.shape)))

	# Keep batch axis
	t = tf.reduce_sum(tensor, axis=axes )
	t += EPSILON
	t = tf.reciprocal(t)
	t = tf.check_numerics(t, "1/sum")

	tensor = tf.einsum('brc,b->brc', tensor, t)

	tensor = dynamic_assert_shape(tensor, in_shape, "layer_normalize_tensor")
	return tensor 
Example #5
Source File: univariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _log_prob(self, given):
        # TODO: not right when given=0 or 1
        alpha, beta = self.alpha, self.beta
        log_given = tf.log(given)
        log_1_minus_given = tf.log(1 - given)
        lgamma_alpha, lgamma_beta = tf.lgamma(alpha), tf.lgamma(beta)
        lgamma_alpha_plus_beta = tf.lgamma(alpha + beta)

        if self._check_numerics:
            log_given = tf.check_numerics(log_given, "log(given)")
            log_1_minus_given = tf.check_numerics(
                log_1_minus_given, "log(1 - given)")
            lgamma_alpha = tf.check_numerics(lgamma_alpha, "lgamma(alpha)")
            lgamma_beta = tf.check_numerics(lgamma_beta, "lgamma(beta)")
            lgamma_alpha_plus_beta = tf.check_numerics(
                lgamma_alpha_plus_beta, "lgamma(alpha + beta)")

        return (alpha - 1) * log_given + (beta - 1) * log_1_minus_given - (
            lgamma_alpha + lgamma_beta - lgamma_alpha_plus_beta) 
Example #6
Source File: algorithm.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _mask(self, tensor, length):
    """Set padding elements of a batch of sequences to zero.

    Useful to then safely sum along the time dimension.

    Args:
      tensor: Tensor of sequences.
      length: Batch of sequence lengths.

    Returns:
      Masked sequences.
    """
    with tf.name_scope('mask'):
      range_ = tf.range(tensor.shape[1].value)
      mask = tf.cast(range_[None, :] < length[:, None], tf.float32)
      masked = tensor * mask
      return tf.check_numerics(masked, 'masked') 
Example #7
Source File: ppo.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def calculate_generalized_advantage_estimator(
    reward, value, done, gae_gamma, gae_lambda):
  # pylint: disable=g-doc-args
  """Generalized advantage estimator.

  Returns:
    GAE estimator. It will be one element shorter than the input; this is
    because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
  """
  # pylint: enable=g-doc-args

  next_value = value[1:, :]
  next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
  delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
           - value[:-1, :])

  return_ = tf.reverse(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
      [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
      tf.zeros_like(delta[0, :]),
      parallel_iterations=1), [0])
  return tf.check_numerics(return_, "return") 
Example #8
Source File: algorithm.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _value_loss(self, observ, reward, length):
    """Compute the loss function for the value baseline.

    The value loss is the difference between empirical and approximated returns
    over the collected episodes. Returns the loss tensor and a summary strin.

    Args:
      observ: Sequences of observations.
      reward: Sequences of reward.
      length: Batch of sequence lengths.

    Returns:
      Tuple of loss tensor and summary tensor.
    """
    with tf.name_scope('value_loss'):
      value = self._network(observ, length).value
      return_ = utility.discounted_return(
          reward, length, self._config.discount)
      advantage = return_ - value
      value_loss = 0.5 * self._mask(advantage ** 2, length)
      summary = tf.summary.merge([
          tf.summary.histogram('value_loss', value_loss),
          tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
      value_loss = tf.reduce_mean(value_loss)
      return tf.check_numerics(value_loss, 'value_loss'), summary 
Example #9
Source File: model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _BuildLoss(self):
    # 1. reconstr_loss seems doesn't do better than l2 loss.
    # 2. Only works when using reduce_mean. reduce_sum doesn't work.
    # 3. It seems kl loss doesn't play an important role.
    self.loss = 0
    with tf.variable_scope('loss'):
      if self.params['l2_loss']:
        l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
        tf.summary.scalar('l2_loss', l2_loss)
        self.loss += l2_loss
      if self.params['reconstr_loss']:
        reconstr_loss = (-tf.reduce_mean(
            self.diffs[1] * (1e-10 + self.diff_output) +
            (1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
        reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
        tf.summary.scalar('reconstr_loss', reconstr_loss)
        self.loss += reconstr_loss
      if self.params['kl_loss']:
        kl_loss = (0.5 * tf.reduce_mean(
            tf.square(self.z_mean) + tf.square(self.z_stddev) -
            2 * self.z_stddev_log - 1))
        tf.summary.scalar('kl_loss', kl_loss)
        self.loss += kl_loss

      tf.summary.scalar('loss', self.loss) 
Example #10
Source File: py_func_batch_env.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def simulate(self, action):
    """Step the batch of environments.

    The results of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the batch of actions to apply.

    Returns:
      Operation.
    """
    with tf.name_scope("environment/simulate"):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, "action")
      def step(action):
        (observ, reward, done) = self._batch_env.step(action)
        return (observ, reward.astype(np.float32), done)
      observ, reward, done = tf.py_func(
          step, [action],
          [self.observ_dtype, tf.float32, tf.bool], name="step")
      reward = tf.check_numerics(reward, "reward")
      reward.set_shape((len(self),))
      done.set_shape((len(self),))
      with tf.control_dependencies([self._observ.assign(observ)]):
        return tf.identity(reward), tf.identity(done) 
Example #11
Source File: rebar.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def _u_to_v(self, log_alpha, u, eps = 1e-8):
    """Convert u to tied randomness in v."""
    u_prime = tf.nn.sigmoid(-log_alpha)  # g(u') = 0

    v_1 = (u - u_prime) / tf.clip_by_value(1 - u_prime, eps, 1)
    v_1 = tf.clip_by_value(v_1, 0, 1)
    v_1 = tf.stop_gradient(v_1)
    v_1 = v_1*(1 - u_prime) + u_prime
    v_0 = u / tf.clip_by_value(u_prime, eps, 1)
    v_0 = tf.clip_by_value(v_0, 0, 1)
    v_0 = tf.stop_gradient(v_0)
    v_0 = v_0 * u_prime

    v = tf.where(u > u_prime, v_1, v_0)
    v = tf.check_numerics(v, 'v sampling is not numerically stable.')
    v = v + tf.stop_gradient(-v + u)  # v and u are the same up to numerical errors

    return v 
Example #12
Source File: py_func_batch_env.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def simulate(self, action):
    """Step the batch of environments.

    The results of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the batch of actions to apply.

    Returns:
      Operation.
    """
    with tf.name_scope("environment/simulate"):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, "action")
      def step(action):
        (observ, reward, done) = self._batch_env.step(action)
        return (observ, reward.astype(np.float32), done)
      observ, reward, done = tf.py_func(
          step, [action],
          [self.observ_dtype, tf.float32, tf.bool], name="step")
      reward = tf.check_numerics(reward, "reward")
      reward.set_shape((len(self),))
      done.set_shape((len(self),))
      with tf.control_dependencies([self._observ.assign(observ)]):
        return tf.identity(reward), tf.identity(done) 
Example #13
Source File: ppo.py    From BERT with Apache License 2.0 6 votes vote down vote up
def calculate_generalized_advantage_estimator(
    reward, value, done, gae_gamma, gae_lambda):
  # pylint: disable=g-doc-args
  """Generalized advantage estimator.

  Returns:
    GAE estimator. It will be one element shorter than the input; this is
    because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
  """
  # pylint: enable=g-doc-args

  next_value = value[1:, :]
  next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
  delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
           - value[:-1, :])

  return_ = tf.reverse(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
      [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
      tf.zeros_like(delta[0, :]),
      parallel_iterations=1), [0])
  return tf.check_numerics(return_, "return") 
Example #14
Source File: univariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _log_prob(self, given):
        temperature, logits = self.path_param(self.temperature), \
                              self.path_param(self.logits)
        log_given = tf.log(given)
        log_1_minus_given = tf.log(1 - given)
        log_temperature = tf.log(temperature)

        if self._check_numerics:
            log_given = tf.check_numerics(log_given, "log(given)")
            log_1_minus_given = tf.check_numerics(
                log_1_minus_given, "log(1 - given)")
            log_temperature = tf.check_numerics(
                log_temperature, "log(temperature)")

        logistic_given = log_given - log_1_minus_given
        temp = temperature * logistic_given - logits

        return log_temperature - log_given - log_1_minus_given + \
            temp - 2 * tf.nn.softplus(temp) 
Example #15
Source File: algorithm.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _value_loss(self, observ, reward, length):
    """Compute the loss function for the value baseline.

    The value loss is the difference between empirical and approximated returns
    over the collected episodes. Returns the loss tensor and a summary strin.

    Args:
      observ: Sequences of observations.
      reward: Sequences of reward.
      length: Batch of sequence lengths.

    Returns:
      Tuple of loss tensor and summary tensor.
    """
    with tf.name_scope('value_loss'):
      value = self._network(observ, length).value
      return_ = utility.discounted_return(
          reward, length, self._config.discount)
      advantage = return_ - value
      value_loss = 0.5 * self._mask(advantage ** 2, length)
      summary = tf.summary.merge([
          tf.summary.histogram('value_loss', value_loss),
          tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
      value_loss = tf.reduce_mean(value_loss)
      return tf.check_numerics(value_loss, 'value_loss'), summary 
Example #16
Source File: py_func_batch_env.py    From fine-lm with MIT License 6 votes vote down vote up
def _reset_non_empty(self, indices):
    """Reset the batch of environments.

    Args:
      indices: The batch indices of the environments to reset; defaults to all.

    Returns:
      Batch tensor of the new observations.
    """
    observ_dtype = utils.parse_dtype(self._batch_env.observation_space)
    observ = tf.py_func(
        self._batch_env.reset, [indices], observ_dtype, name='reset')
    observ = tf.check_numerics(observ, 'observ')
    with tf.control_dependencies([
        tf.scatter_update(self._observ, indices, observ)]):
      return tf.identity(observ) 
Example #17
Source File: py_func_batch_env.py    From fine-lm with MIT License 6 votes vote down vote up
def simulate(self, action):
    """Step the batch of environments.

    The results of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the batch of actions to apply.

    Returns:
      Operation.
    """
    with tf.name_scope('environment/simulate'):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, 'action')
      observ_dtype = utils.parse_dtype(self._batch_env.observation_space)
      observ, reward, done = tf.py_func(
          lambda a: self._batch_env.step(a)[:3], [action],
          [observ_dtype, tf.float32, tf.bool], name='step')
      observ = tf.check_numerics(observ, 'observ')
      reward = tf.check_numerics(reward, 'reward')
      reward.set_shape((len(self),))
      done.set_shape((len(self),))
      with tf.control_dependencies([self._observ.assign(observ)]):
        return tf.identity(reward), tf.identity(done) 
Example #18
Source File: ppo.py    From fine-lm with MIT License 6 votes vote down vote up
def calculate_generalized_advantage_estimator(
    reward, value, done, gae_gamma, gae_lambda):
  """Generalized advantage estimator."""

  # Below is slight weirdness, we set the last reward to 0.
  # This makes the advantage to be 0 in the last timestep
  reward = tf.concat([reward[:-1, :], value[-1:, :]], axis=0)
  next_value = tf.concat([value[1:, :], tf.zeros_like(value[-1:, :])], axis=0)
  next_not_done = 1 - tf.cast(tf.concat([done[1:, :],
                                         tf.zeros_like(done[-1:, :])], axis=0),
                              tf.float32)
  delta = reward + gae_gamma * next_value * next_not_done - value

  return_ = tf.reverse(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
      [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
      tf.zeros_like(delta[0, :]),
      parallel_iterations=1), [0])
  return tf.check_numerics(return_, "return") 
Example #19
Source File: hmc.py    From zhusuan with MIT License 6 votes vote down vote up
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
    old_hamiltonian, old_log_prob = hamiltonian(
        q, p, log_posterior, mass, data_axes)
    new_hamiltonian, new_log_prob = hamiltonian(
        new_q, new_p, log_posterior, mass, data_axes)
    old_log_prob = tf.check_numerics(
        old_log_prob,
        'HMC: old_log_prob has numeric errors! Try better initialization.')
    acceptance_rate = tf.exp(
        tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
    is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
                               tf.is_finite(new_log_prob))
    acceptance_rate = tf.where(is_finite, acceptance_rate,
                               tf.zeros_like(acceptance_rate))
    return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
        acceptance_rate 
Example #20
Source File: model.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def _BuildLoss(self):
    # 1. reconstr_loss seems doesn't do better than l2 loss.
    # 2. Only works when using reduce_mean. reduce_sum doesn't work.
    # 3. It seems kl loss doesn't play an important role.
    self.loss = 0
    with tf.variable_scope('loss'):
      if self.params['l2_loss']:
        l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
        tf.summary.scalar('l2_loss', l2_loss)
        self.loss += l2_loss
      if self.params['reconstr_loss']:
        reconstr_loss = (-tf.reduce_mean(
            self.diffs[1] * (1e-10 + self.diff_output) +
            (1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
        reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
        tf.summary.scalar('reconstr_loss', reconstr_loss)
        self.loss += reconstr_loss
      if self.params['kl_loss']:
        kl_loss = (0.5 * tf.reduce_mean(
            tf.square(self.z_mean) + tf.square(self.z_stddev) -
            2 * self.z_stddev_log - 1))
        tf.summary.scalar('kl_loss', kl_loss)
        self.loss += kl_loss

      tf.summary.scalar('loss', self.loss) 
Example #21
Source File: in_graph_batch_env.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def reset(self, indices=None):
    """Reset the batch of environments.

    Args:
      indices: The batch indices of the environments to reset; defaults to all.

    Returns:
      Batch tensor of the new observations.
    """
    if indices is None:
      indices = tf.range(len(self._batch_env))
    observ_dtype = self._parse_dtype(self._batch_env.observation_space)
    observ = tf.py_func(
        self._batch_env.reset, [indices], observ_dtype, name='reset')
    observ = tf.check_numerics(observ, 'observ')
    reward = tf.zeros_like(indices, tf.float32)
    done = tf.zeros_like(indices, tf.bool)
    with tf.control_dependencies([
        tf.scatter_update(self._observ, indices, observ),
        tf.scatter_update(self._reward, indices, reward),
        tf.scatter_update(self._done, indices, done)]):
      return tf.identity(observ) 
Example #22
Source File: in_graph_batch_env.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def simulate(self, action):
    """Step the batch of environments.

    The results of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the batch of actions to apply.

    Returns:
      Operation.
    """
    with tf.name_scope('environment/simulate'):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, 'action')
      observ_dtype = self._parse_dtype(self._batch_env.observation_space)
      observ, reward, done = tf.py_func(
          lambda a: self._batch_env.step(a)[:3], [action],
          [observ_dtype, tf.float32, tf.bool], name='step')
      observ = tf.check_numerics(observ, 'observ')
      reward = tf.check_numerics(reward, 'reward')
      return tf.group(
          self._observ.assign(observ),
          self._action.assign(action),
          self._reward.assign(reward),
          self._done.assign(done)) 
Example #23
Source File: ppo.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def calculate_generalized_advantage_estimator(
    reward, value, done, gae_gamma, gae_lambda):
  # pylint: disable=g-doc-args
  """Generalized advantage estimator.

  Returns:
    GAE estimator. It will be one element shorter than the input; this is
    because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
  """
  # pylint: enable=g-doc-args

  next_value = value[1:, :]
  next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
  delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
           - value[:-1, :])

  return_ = tf.reverse(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
      [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
      tf.zeros_like(delta[0, :]),
      parallel_iterations=1), [0])
  return tf.check_numerics(return_, "return") 
Example #24
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _log_prob(self, given):
        mean, cov_tril = (self.path_param(self.mean),
                          self.path_param(self.cov_tril))
        log_det = 2 * tf.reduce_sum(
            tf.log(tf.matrix_diag_part(cov_tril)), axis=-1)
        n_dim = tf.cast(self._n_dim, self.dtype)
        log_z = - n_dim / 2 * tf.log(
            2 * tf.constant(np.pi, dtype=self.dtype)) - log_det / 2
        # log_z.shape == batch_shape
        if self._check_numerics:
            log_z = tf.check_numerics(log_z, "log[det(Cov)]")
        # (given-mean)' Sigma^{-1} (given-mean) =
        # (g-m)' L^{-T} L^{-1} (g-m) = |x|^2, where Lx = g-m =: y.
        y = tf.expand_dims(given - mean, -1)
        L, _ = maybe_explicit_broadcast(
            cov_tril, y, 'MultivariateNormalCholesky.cov_tril',
            'expand_dims(given, -1)')
        x = tf.matrix_triangular_solve(L, y, lower=True)
        x = tf.squeeze(x, -1)
        stoc_dist = -0.5 * tf.reduce_sum(tf.square(x), axis=-1)
        return log_z + stoc_dist 
Example #25
Source File: univariate.py    From zhusuan with MIT License 6 votes vote down vote up
def __init__(self,
                 rate,
                 dtype=tf.int32,
                 group_ndims=0,
                 check_numerics=False,
                 **kwargs):
        self._rate = tf.convert_to_tensor(rate)
        param_dtype = assert_same_float_dtype(
            [(self._rate, 'Poisson.rate')])

        assert_dtype_is_int_or_float(dtype)

        self._check_numerics = check_numerics

        super(Poisson, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs) 
Example #26
Source File: univariate.py    From zhusuan with MIT License 5 votes vote down vote up
def __init__(self,
                 minval=0.,
                 maxval=1.,
                 group_ndims=0,
                 is_reparameterized=True,
                 check_numerics=False,
                 **kwargs):
        self._minval = tf.convert_to_tensor(minval)
        self._maxval = tf.convert_to_tensor(maxval)
        dtype = assert_same_float_dtype(
            [(self._minval, 'Uniform.minval'),
             (self._maxval, 'Uniform.maxval')])

        try:
            tf.broadcast_static_shape(self._minval.get_shape(),
                                      self._maxval.get_shape())
        except ValueError:
            raise ValueError(
                "minval and maxval should be broadcastable to match each "
                "other. ({} vs. {})".format(
                    self._minval.get_shape(), self._maxval.get_shape()))
        self._check_numerics = check_numerics
        super(Uniform, self).__init__(
            dtype=dtype,
            param_dtype=dtype,
            is_continuous=True,
            is_reparameterized=is_reparameterized,
            group_ndims=group_ndims,
            **kwargs) 
Example #27
Source File: univariate.py    From zhusuan with MIT License 5 votes vote down vote up
def __init__(self,
                 alpha,
                 beta,
                 group_ndims=0,
                 check_numerics=False,
                 **kwargs):
        self._alpha = tf.convert_to_tensor(alpha)
        self._beta = tf.convert_to_tensor(beta)
        dtype = assert_same_float_dtype(
            [(self._alpha, 'Beta.alpha'),
             (self._beta, 'Beta.beta')])

        try:
            tf.broadcast_static_shape(self._alpha.get_shape(),
                                      self._beta.get_shape())
        except ValueError:
            raise ValueError(
                "alpha and beta should be broadcastable to match each "
                "other. ({} vs. {})".format(
                    self._alpha.get_shape(), self._beta.get_shape()))
        self._check_numerics = check_numerics
        super(Beta, self).__init__(
            dtype=dtype,
            param_dtype=dtype,
            is_continuous=True,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs) 
Example #28
Source File: model.py    From gconvRNN with MIT License 5 votes vote down vote up
def _build_optim(self):
        def minimize(loss, step, var_list, learning_rate, optimizer):
            if optimizer == "sgd":
                optim = tf.train.GradientDescentOptimizer(learning_rate)
            elif optimizer == "adam":
                optim = tf.train.AdamOptimizer(learning_rate)
            elif optimizer == "rmsprop":
                optim = tf.train.RMSPropOptimizer(learning_rate)
            else:
                raise Exception("[!] Unkown optimizer: {}".format(
                    optimizer))
            ## Gradient clipping ##    
            if self.max_grad_norm is not None:
                grads_and_vars = optim.compute_gradients(
                    loss, var_list=var_list)
                new_grads_and_vars = []
                for idx, (grad, var) in enumerate(grads_and_vars):
                    if grad is not None and var in var_list:
                        grad = tf.clip_by_norm(grad, self.max_grad_norm)
                        grad = tf.check_numerics(
                            grad, "Numerical error in gradient for {}".format(
                                var.name))
                        new_grads_and_vars.append((grad, var))
                return optim.apply_gradients(new_grads_and_vars, global_step=step)
            else:
                grads_and_vars = optim.compute_gradients(
                    loss, var_list=var_list)
                return optim.apply_gradients(grads_and_vars,
                                             global_step=step)
        
        # optim #
        self.model_optim = minimize(
            self.loss,
            self.model_step,
            self.model_vars,
            self.learning_rate,
            self.optimizer) 
Example #29
Source File: univariate.py    From zhusuan with MIT License 5 votes vote down vote up
def __init__(self,
                 alpha,
                 beta,
                 group_ndims=0,
                 check_numerics=False,
                 **kwargs):
        self._alpha = tf.convert_to_tensor(alpha)
        self._beta = tf.convert_to_tensor(beta)
        dtype = assert_same_float_dtype(
            [(self._alpha, 'Gamma.alpha'),
             (self._beta, 'Gamma.beta')])

        try:
            tf.broadcast_static_shape(self._alpha.get_shape(),
                                      self._beta.get_shape())
        except ValueError:
            raise ValueError(
                "alpha and beta should be broadcastable to match each "
                "other. ({} vs. {})".format(
                    self._alpha.get_shape(), self._beta.get_shape()))
        self._check_numerics = check_numerics
        super(Gamma, self).__init__(
            dtype=dtype,
            param_dtype=dtype,
            is_continuous=True,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs) 
Example #30
Source File: univariate.py    From zhusuan with MIT License 5 votes vote down vote up
def __init__(self,
                 logits,
                 n_experiments,
                 dtype=tf.int32,
                 group_ndims=0,
                 check_numerics=False,
                 **kwargs):
        self._logits = tf.convert_to_tensor(logits)
        param_dtype = assert_same_float_dtype(
            [(self._logits, 'Binomial.logits')])

        assert_dtype_is_int_or_float(dtype)

        sign_err_msg = "n_experiments must be positive"
        if isinstance(n_experiments, int):
            if n_experiments <= 0:
                raise ValueError(sign_err_msg)
            self._n_experiments = n_experiments
        else:
            try:
                n_experiments = tf.convert_to_tensor(n_experiments, tf.int32)
            except ValueError:
                raise TypeError('n_experiments must be int32')
            _assert_rank_op = tf.assert_rank(
                n_experiments, 0,
                message="n_experiments should be a scalar (0-D Tensor).")
            _assert_positive_op = tf.assert_greater(
                n_experiments, 0, message=sign_err_msg)
            with tf.control_dependencies([_assert_rank_op,
                                          _assert_positive_op]):
                self._n_experiments = tf.identity(n_experiments)

        self._check_numerics = check_numerics
        super(Binomial, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs)