Python tensorflow.log() Examples

The following are 30 code examples of tensorflow.log(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: common_layers.py    From fine-lm with MIT License 7 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example #2
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def logp_t(self, z_t_bxu, z_tm1_bxu=None):
    """Compute the log-likelihood under the distribution for a given time t,
    not the whole sequence.

    Args:
      z_t_bxu: sample to compute likelihood for at time t.
      z_tm1_bxu (optional): sample condition probability of z_t upon.

    Returns:
      The likelihood of p_t under the model at time t. i.e.
        p(z_t|z_tm1) = N(z_tm1 * phis, eps^2)

    """
    if z_tm1_bxu is None:
      return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu,
                                          self.logpvars_bxu)
    else:
      means_t_bxu = self.pmeans_bxu + self.phis_bxu * z_tm1_bxu
      logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu,
                                                    means_t_bxu,
                                                    self.logevars_bxu)
      return logp_tgtm1_bxu 
Example #3
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def logp(self, z=None):
    """Compute the log-likelihood under the distribution.

    Args:
      z (optional): value to compute likelihood for, if None, use sample.

    Returns:
      The likelihood of z under the model.
    """
    if z is None:
      z = self.sample

    # This is needed to make sure that the gradients are simple.
    # The value of the function shouldn't change.
    if z == self.sample:
      return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)

    return diag_gaussian_log_likelihood(z, self.mean, self.logvar) 
Example #4
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, batch_size, z_size, mean, logvar):
    """Create a diagonal gaussian distribution.

    Args:
      batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
      z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
      mean: The N-D mean of the distribution.
      logvar: The N-D log variance of the diagonal distribution.
    """
    size__xz = [None, z_size]
    self.mean = mean            # bxn already
    self.logvar = logvar        # bxn already
    self.noise = noise = tf.random_normal(tf.shape(logvar))
    self.sample = mean + tf.exp(0.5 * logvar) * noise
    mean.set_shape(size__xz)
    logvar.set_shape(size__xz)
    self.sample.set_shape(size__xz) 
Example #5
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def gaussian_pos_log_likelihood(unused_mean, logvar, noise):
  """Gaussian log-likelihood function for a posterior in VAE

  Note: This function is specialized for a posterior distribution, that has the
  form of z = mean + sigma * noise.

  Args:
    unused_mean: ignore
    logvar: The log variance of the distribution
    noise: The noise used in the sampling of the posterior.

  Returns:
    The log-likelihood under the Gaussian model.
  """
  # ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2
  return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise)) 
Example #6
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def logp(self, z=None):
    """Compute the log-likelihood under the distribution.

    Args:
      z (optional): value to compute likelihood for, if None, use sample.

    Returns:
      The likelihood of z under the model.
    """
    if z is None:
      z = self.sample

    # This is needed to make sure that the gradients are simple.
    # The value of the function shouldn't change.
    if z == self.sample_bxn:
      return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn,
                                         self.noise_bxn)

    return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn) 
Example #7
Source File: policy.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def log_prob_action(self, action, logits,
                      sampling_dim, act_dim, act_type):
    """Calculate log-prob of action sampled from distribution."""
    if self.env_spec.is_discrete(act_type):
      act_log_prob = tf.reduce_sum(
          tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits), -1)
    elif self.env_spec.is_box(act_type):
      means = logits[:, :sampling_dim / 2]
      std = logits[:, sampling_dim / 2:]
      act_log_prob = (- 0.5 * tf.log(2 * np.pi * tf.square(std))
                      - 0.5 * tf.square(action - means) / tf.square(std))
      act_log_prob = tf.reduce_sum(act_log_prob, -1)
    else:
      assert False

    return act_log_prob 
Example #8
Source File: utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def log_sum_exp(x_k):
  """Computes log \sum exp in a numerically stable way.
    log ( sum_i exp(x_i) )
    log ( sum_i exp(x_i - m + m) ),       with m = max(x_i)
    log ( sum_i exp(x_i - m)*exp(m) )
    log ( sum_i exp(x_i - m) + m

  Args:
    x_k - k -dimensional list of arguments to log_sum_exp.

  Returns:
    log_sum_exp of the arguments.
  """
  m = tf.reduce_max(x_k)
  x1_k = x_k - m
  u_k = tf.exp(x1_k)
  z = tf.reduce_sum(u_k)
  return tf.log(z) + m 
Example #9
Source File: distributions.py    From lirpg with MIT License 6 votes vote down vote up
def sample_dtype(self):
        return tf.int32

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=-1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) 
Example #10
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def logp(self, z=None):
    """Compute the log-likelihood under the distribution.

    Args:
      z (optional): value to compute likelihood for, if None, use sample.

    Returns:
      The likelihood of z under the model.
    """

    if z is None:
      z = self.sample

    # This is needed to make sure that the gradients are simple.
    # The value of the function shouldn't change.
    if z == self.sample_bxn:
      return gaussian_pos_log_likelihood(self.mean_bxn,
                                         self.logvar_bxn, self.noise_bxn)

    return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn) 
Example #11
Source File: policy.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def single_step(self, prev, cur, greedy=False):
    """Single RNN step.  Equivalently, single-time-step sampled actions."""
    prev_internal_state, prev_actions, _, _, _, _ = prev
    obs, actions = cur  # state observed and action taken at this time step

    # feed into RNN cell
    output, next_state = self.core(
        obs, prev_internal_state, prev_actions)

    # sample actions with values and log-probs
    (actions, logits, log_probs,
     entropy, self_kl) = self.sample_actions(
        output, actions=actions, greedy=greedy)

    return (next_state, tuple(actions), tuple(logits), tuple(log_probs),
            tuple(entropy), tuple(self_kl)) 
Example #12
Source File: train.py    From fine-lm with MIT License 6 votes vote down vote up
def binary_cross_entropy(x, y, smoothing=0, epsilon=1e-12):
  """Computes the averaged binary cross entropy.

  bce = y*log(x) + (1-y)*log(1-x)

  Args:
    x: The predicted labels.
    y: The true labels.
    smoothing: The label smoothing coefficient.

  Returns:
    The cross entropy.
  """
  y = tf.to_float(y)
  if smoothing > 0:
    smoothing *= 2
    y = y * (1 - smoothing) + 0.5 * smoothing
  return -tf.reduce_mean(tf.log(x + epsilon) * y + tf.log(1.0 - x + epsilon) * (1 - y)) 
Example #13
Source File: accountant.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
Example #14
Source File: model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _BuildLoss(self):
    # 1. reconstr_loss seems doesn't do better than l2 loss.
    # 2. Only works when using reduce_mean. reduce_sum doesn't work.
    # 3. It seems kl loss doesn't play an important role.
    self.loss = 0
    with tf.variable_scope('loss'):
      if self.params['l2_loss']:
        l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
        tf.summary.scalar('l2_loss', l2_loss)
        self.loss += l2_loss
      if self.params['reconstr_loss']:
        reconstr_loss = (-tf.reduce_mean(
            self.diffs[1] * (1e-10 + self.diff_output) +
            (1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
        reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
        tf.summary.scalar('reconstr_loss', reconstr_loss)
        self.loss += reconstr_loss
      if self.params['kl_loss']:
        kl_loss = (0.5 * tf.reduce_mean(
            tf.square(self.z_mean) + tf.square(self.z_stddev) -
            2 * self.z_stddev_log - 1))
        tf.summary.scalar('kl_loss', kl_loss)
        self.loss += kl_loss

      tf.summary.scalar('loss', self.loss) 
Example #15
Source File: sigrecogtf.py    From signature-recognition with MIT License 6 votes vote down vote up
def sgd(training_data, training_labels, test_data, test_labels):
    # model
    with tf.variable_scope("regression"):
        x = tf.placeholder(tf.float32, [None, 901])
        y, variables = regression(x)

    # train
    y_ = tf.placeholder("float", [None, 2])
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(train_step, feed_dict={x: training_data, y_: training_labels})
        print(sess.run(accuracy, feed_dict={x: test_data, y_: test_labels})) 
Example #16
Source File: cycle_gan.py    From fine-lm with MIT License 6 votes vote down vote up
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
  """Loss function."""
  eps = 1e-12
  with tf.variable_scope(name):
    d1 = discriminator(real_input, compress, hparams, "discriminator")
    d2 = discriminator(fake_input, compress, hparams, "discriminator",
                       reuse=True)
    if lsgan:
      dloss = tf.reduce_mean(
          tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
      gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
      loss = (dloss + gloss)/2
    else:  # cross_entropy
      dloss = -tf.reduce_mean(
          tf.log(d1 + eps)) - tf.reduce_mean(tf.log(1 - d2 + eps))
      gloss = -tf.reduce_mean(tf.log(d2 + eps))
      loss = (dloss + gloss)/2
    return loss 
Example #17
Source File: competition_model_class.py    From Deep_Learning_Weather_Forecasting with Apache License 2.0 6 votes vote down vote up
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
            lr=self.lr, decay=self.decay,
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser)

        def _mve_loss(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred)
            pred_sig = crop(2,3,6)(y_pred)
            print(pred_sig)
            #exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero    
            #precision = 1./exp_sig
            precision = 1./pred_sig
            #log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
            log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)            
          
            log_loss=tf.reduce_mean(log_loss)
            return log_loss

        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=_mve_loss) 
Example #18
Source File: autoencoders.py    From fine-lm with MIT License 6 votes vote down vote up
def bottleneck(self, x):  # pylint: disable=arguments-differ
    hparams = self.hparams
    if hparams.unordered:
      return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
    noise = hparams.bottleneck_noise
    hparams.bottleneck_noise = 0.0  # We'll add noise below.
    x, loss = discretization.parametrized_bottleneck(x, hparams)
    hparams.bottleneck_noise = noise
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      # We want a number p such that p^bottleneck_bits = 1 - noise.
      # So log(p) * bottleneck_bits = log(noise)
      log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits)
      # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
      noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
      # Having the no-noise mask, we can make noise just uniformly at random.
      ordered_noise = tf.random_uniform(tf.shape(x))
      # We want our noise to be 1s at the start and random {-1, 1} bits later.
      ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
      # Now we flip the bits of x on the noisy positions (ordered and normal).
      x *= 2.0 * ordered_noise - 1
    return x, loss 
Example #19
Source File: utils.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 5 votes vote down vote up
def cal_nkde(X,mu,sigma):
    s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
    E=s1[-1]
    Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
    return (Z-E)/mu.shape[0] 
Example #20
Source File: utils.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 5 votes vote down vote up
def log_mean_exp(A,b,sigma):
    a = - 0.5 * tf.reduce_sum(((A - tf.tile(b,[A.get_shape().as_list()[0],1]))**2), axis=1) / (sigma**2)
    max_ = a.max()
    return max_ + tf.log(tf.reduce_mean(tf.exp(a - tf.tile(max_, a.get_shape().as_list()[0])))) 
Example #21
Source File: utils.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 5 votes vote down vote up
def set_logger(logPath, timestr, name):
	logger = logging.getLogger(name)
	logger.setLevel(logging.INFO)
	fh = logging.FileHandler(logPath + timestr + '.log')
	fh.setLevel(logging.INFO)
	ch = logging.StreamHandler()
	ch.setLevel(logging.INFO)
	formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
	fh.setFormatter(formatter)
	ch.setFormatter(formatter)
	logger.addHandler(fh)
	return logger 
Example #22
Source File: utils.py    From lirpg with MIT License 5 votes vote down vote up
def cat_entropy_softmax(p0):
    return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1) 
Example #23
Source File: adversary.py    From lirpg with MIT License 5 votes vote down vote up
def __init__(self, env, hidden_size, entcoeff=0.001, lr_rate=1e-3, scope="adversary"):
        self.scope = scope
        self.observation_shape = env.observation_space.shape
        self.actions_shape = env.action_space.shape
        self.input_shape = tuple([o+a for o, a in zip(self.observation_shape, self.actions_shape)])
        self.num_actions = env.action_space.shape[0]
        self.hidden_size = hidden_size
        self.build_ph()
        # Build grpah
        generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False)
        expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True)
        # Build accuracy
        generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5))
        expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5))
        # Build regression loss
        # let x = logits, z = targets.
        # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
        generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits))
        generator_loss = tf.reduce_mean(generator_loss)
        expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))
        expert_loss = tf.reduce_mean(expert_loss)
        # Build entropy loss
        logits = tf.concat([generator_logits, expert_logits], 0)
        entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))
        entropy_loss = -entcoeff*entropy
        # Loss + Accuracy terms
        self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]
        self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"]
        self.total_loss = generator_loss + expert_loss + entropy_loss
        # Build Reward for policy
        self.reward_op = -tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8)
        var_list = self.get_trainable_variables()
        self.lossandgrad = U.function([self.generator_obs_ph, self.generator_acs_ph, self.expert_obs_ph, self.expert_acs_ph],
                                      self.losses + [U.flatgrad(self.total_loss, var_list)]) 
Example #24
Source File: adversary.py    From lirpg with MIT License 5 votes vote down vote up
def logsigmoid(a):
    '''Equivalent to tf.log(tf.sigmoid(a))'''
    return -tf.nn.softplus(-a) 
Example #25
Source File: model.py    From nn_physical_concepts with Apache License 2.0 5 votes vote down vote up
def kl_divergence(means, log_sigma, dim, target_sigma=0.1):
    # KL divergence between given distribution and unit Gaussian
    target_sigma = tf.constant(target_sigma, shape=[dim])
    return 1 / 2. * tf.reduce_mean(tf.reduce_sum(1 / target_sigma**2 * means**2 +
                                                 tf.exp(2 * log_sigma) / target_sigma**2 - 2 * log_sigma + 2 * tf.log(target_sigma), axis=1) - dim) 
Example #26
Source File: discretization.py    From fine-lm with MIT License 5 votes vote down vote up
def gumbel_sample(shape):
  """Sample from the Gumbel distribution, protect from overflows.

  Args:
    shape: Shape of Gumbel samples.

  Returns:
    Noise drawn from Gumbel distribution.
  """
  uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998)
  return -tf.log(-tf.log(uniform_samples)) 
Example #27
Source File: common_attention.py    From fine-lm with MIT License 5 votes vote down vote up
def add_timing_signal_1d_given_position(x,
                                        position,
                                        min_timescale=1.0,
                                        max_timescale=1.0e4):
  """Adds sinusoids of diff frequencies to a Tensor, with timing position given.

  Args:
    x: a Tensor with shape [batch, length, channels]
    position: a Tensor with shape [batch, length]
    min_timescale: a float
    max_timescale: a float

  Returns:
    a Tensor the same shape as x.
  """
  channels = common_layers.shape_list(x)[2]
  num_timescales = channels // 2
  log_timescale_increment = (
      math.log(float(max_timescale) / float(min_timescale)) /
      (tf.to_float(num_timescales) - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = (
      tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
          tf.expand_dims(inv_timescales, 0), 0))
  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
  signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
  signal = common_layers.cast_like(signal, x)
  return x + signal 
Example #28
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def inverse_exp_decay(max_step, min_value=0.01):
  """Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
  inv_base = tf.exp(tf.log(min_value) / float(max_step))
  step = tf.to_float(tf.train.get_global_step())
  return inv_base**tf.maximum(float(max_step) - step, 0.0) 
Example #29
Source File: yellowfin.py    From fine-lm with MIT License 5 votes vote down vote up
def _curvature_range(self):
    """Curvature range.

    Returns:
      h_max_t, h_min_t ops
    """
    self._curv_win = tf.get_variable("curv_win",
                                     dtype=tf.float32,
                                     trainable=False,
                                     shape=[self.curvature_window_width,],
                                     initializer=tf.zeros_initializer)
    # We use log smoothing for curvature range
    self._curv_win = tf.scatter_update(self._curv_win,
                                       self._step % self.curvature_window_width,
                                       tf.log(self._grad_norm_squared))
    # Note here the iterations start from iteration 0
    valid_window = tf.slice(self._curv_win,
                            tf.constant([0,]),
                            tf.expand_dims(
                                tf.minimum(
                                    tf.constant(self.curvature_window_width),
                                    self._step + 1), dim=0))
    self._h_min_t = tf.reduce_min(valid_window)
    self._h_max_t = tf.reduce_max(valid_window)

    curv_range_ops = []
    with tf.control_dependencies([self._h_min_t, self._h_max_t]):
      avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
      with tf.control_dependencies([avg_op]):
        self._h_min = tf.exp(
            tf.identity(self._moving_averager.average(self._h_min_t)))
        self._h_max = tf.exp(
            tf.identity(self._moving_averager.average(self._h_max_t)))
        if self._sparsity_debias:
          self._h_min *= self._sparsity_avg
          self._h_max *= self._sparsity_avg
    curv_range_ops.append(avg_op)
    return curv_range_ops  # h_max_t, h_min_t 
Example #30
Source File: op.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def log_sum_exp(x, axis=1):
    m = tf.reduce_max(x, axis=axis, keep_dims=True)
    return m + tf.log(tf.reduce_sum(tf.exp(x - m), axis=axis))