Python tensorflow.reduce_logsumexp() Examples

The following are 30 code examples of tensorflow.reduce_logsumexp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: b_omni_2layer.py    From ARM-gradient with MIT License 6 votes vote down vote up
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi  

#%% 
Example #2
Source File: routing.py    From CapsLayer with Apache License 2.0 6 votes vote down vote up
def M_step(log_R, log_activation, vote, lambda_val=0.01):
    R_shape = tf.shape(log_R)
    log_R = log_R + log_activation

    R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
    log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)

    pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True)
    log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True)

    beta_v = tf.get_variable('beta_v',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=15., stddev=3.))
    cost = R_sum_i * (beta_v + 0.5 * log_var)

    beta_a = tf.get_variable('beta_a',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10))
    cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
    logit = lambda_val * (beta_a - cost_sum_h)
    log_activation = tf.log_sigmoid(logit)

    return(pose, log_var, log_activation) 
Example #3
Source File: utils.py    From zhusuan with MIT License 6 votes vote down vote up
def log_sum_exp(x, axis=None, keepdims=False):
    """
    Deprecated: Use tf.reduce_logsumexp().

    Tensorflow numerically stable log sum of exps across the `axis`.

    :param x: A Tensor.
    :param axis: An int or list or tuple. The dimensions to reduce.
        If `None` (the default), reduces all dimensions.
    :param keepdims: Bool. If true, retains reduced dimensions with length 1.
        Default to be False.

    :return: A Tensor after the computation of log sum exp along given axes of
        x.
    """
    x = tf.convert_to_tensor(x)
    x_max = tf.reduce_max(x, axis=axis, keepdims=True)
    ret = tf.log(tf.reduce_sum(tf.exp(x - x_max), axis=axis,
                               keepdims=True)) + x_max
    if not keepdims:
        ret = tf.reduce_sum(ret, axis=axis)
    return ret 
Example #4
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _log_prob(self, given):
        logits, temperature = self.path_param(self.logits), \
                              self.path_param(self.temperature)
        log_given = tf.log(given)
        log_temperature = tf.log(temperature)
        n = tf.cast(self.n_categories, self.dtype)

        if self._check_numerics:
            log_given = tf.check_numerics(log_given, "log(given)")
            log_temperature = tf.check_numerics(
                log_temperature, "log(temperature)")

        temp = logits - temperature * log_given

        return tf.lgamma(n) + (n - 1) * log_temperature + \
            tf.reduce_sum(temp - log_given, axis=-1) - \
            n * tf.reduce_logsumexp(temp, axis=-1) 
Example #5
Source File: loss_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def multilabel_categorical_crossentropy(y_true, y_pred):
	"""
	y_true = [0,1],
	1 stands for target class,
	0 stands for none-target class
	"""
	y_pred = (1 - 2 * y_true) * y_pred
	y_pred_neg = y_pred - y_true * 1e12
	y_pred_pos = y_pred - (1 - y_true) * 1e12

	zeros = tf.zeros_like(y_pred[..., :1])
	y_pred_neg = tf.concat([y_pred_neg, zeros], axis=-1)
	y_pred_pos = tf.concat([y_pred_pos, zeros], axis=-1)
	neg_loss = tf.reduce_logsumexp(y_pred_neg, axis=-1)
	pos_loss = tf.reduce_logsumexp(y_pred_pos, axis=-1)
	return neg_loss + pos_loss 
Example #6
Source File: tensorflow_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: axis: An integer or list of integers in [-rank(x), rank(x)),
            the axes to compute the logsumexp. If `None` (default), computes
            the logsumexp over all dimensions.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #7
Source File: layers.py    From bert4keras with Apache License 2.0 6 votes vote down vote up
def dense_loss(self, y_true, y_pred):
        """y_true需要是one hot形式
        """
        # 导出mask并转换数据类型
        mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
        mask = K.cast(mask, K.floatx())
        # 计算目标分数
        y_true, y_pred = y_true * mask, y_pred * mask
        target_score = self.target_score(y_true, y_pred)
        # 递归计算log Z
        init_states = [y_pred[:, 0]]
        y_pred = K.concatenate([y_pred, mask], axis=2)
        input_length = K.int_shape(y_pred[:, 1:])[1]
        log_norm, _, _ = K.rnn(
            self.log_norm_step,
            y_pred[:, 1:],
            init_states,
            input_length=input_length
        )  # 最后一步的log Z向量
        log_norm = tf.reduce_logsumexp(log_norm, 1)  # logsumexp得标量
        # 计算损失 -log p
        return log_norm - target_score 
Example #8
Source File: crf_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testCrfLogLikelihood(self):
    inputs = np.array(
        [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
    transition_params = np.array(
        [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
    sequence_lengths = np.array(3, dtype=np.int32)
    num_words = inputs.shape[0]
    num_tags = inputs.shape[1]
    with self.test_session() as sess:
      all_sequence_log_likelihoods = []

      # Make sure all probabilities sum to 1.
      for tag_indices in itertools.product(
          range(num_tags), repeat=sequence_lengths):
        tag_indices = list(tag_indices)
        tag_indices.extend([0] * (num_words - sequence_lengths))
        sequence_log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
            inputs=tf.expand_dims(inputs, 0),
            tag_indices=tf.expand_dims(tag_indices, 0),
            sequence_lengths=tf.expand_dims(sequence_lengths, 0),
            transition_params=tf.constant(transition_params))
        all_sequence_log_likelihoods.append(sequence_log_likelihood)
      total_log_likelihood = tf.reduce_logsumexp(all_sequence_log_likelihoods)
      tf_total_log_likelihood = sess.run(total_log_likelihood)
      self.assertAllClose(tf_total_log_likelihood, 0.0) 
Example #9
Source File: networks.py    From auto_yolo with MIT License 6 votes vote down vote up
def _call(self, inp, output_size, is_training):
        H, W, B, _ = tuple(int(i) for i in inp.shape[1:])

        # inp = tf.log(tf.nn.softmax(tf.clip_by_value(inp, -10., 10.), axis=4))
        inp = inp - tf.reduce_logsumexp(inp, axis=4, keepdims=True)

        running_sum = inp[:, 0, 0, 0, :]

        for h in range(H):
            for w in range(W):
                for b in range(B):
                    if h == 0 and w == 0 and b == 0:
                        pass
                    else:
                        right = inp[:, h, w, b, :]
                        running_sum = addition_compact_logspace(running_sum, right)

        assert running_sum.shape[1] == output_size
        return running_sum 
Example #10
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #11
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #12
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #13
Source File: jumpNormalAlgorithms.py    From decompose with MIT License 6 votes vote down vote up
def alpha(cls, parameters: Dict[str, Tensor]) -> Tensor:
        mu = parameters["mu"]
        tau = parameters["tau"]
        nu = parameters["nu"]
        beta = parameters["beta"]

        sigma = 1./tf.sqrt(tau)
        lam = 1./beta

        muStd = tf.constant(0., dtype=mu.dtype)
        sigmaStd = tf.constant(1., dtype=mu.dtype)
        stdNorm = tf.contrib.distributions.Normal(loc=muStd, scale=sigmaStd)

        c0 = lam*(mu-nu) + stdNorm.log_cdf((nu-(mu+sigma**2*lam))/sigma)
        c1 = -lam*(mu-nu) + stdNorm.log_cdf(-(nu-(mu-sigma**2*lam))/sigma)
        c = tf.reduce_logsumexp([c0, c1], axis=0)
        f = (mu-nu)*lam

        norm = tf.distributions.Normal(loc=mu+sigma**2*lam, scale=sigma)

        alpha = tf.exp(f + norm.log_cdf(nu) - c)
        return(alpha) 
Example #14
Source File: b_omni_linear.py    From ARM-gradient with MIT License 6 votes vote down vote up
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi 
    

#%% 
Example #15
Source File: b_omni_nonlinear.py    From ARM-gradient with MIT License 6 votes vote down vote up
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi   

#%% 
Example #16
Source File: b_mnist_nonlinear_slim.py    From ARM-gradient with MIT License 6 votes vote down vote up
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi 

#%% 
Example #17
Source File: b_mnist_linear2_slim.py    From ARM-gradient with MIT License 6 votes vote down vote up
def evidence(sess,data,elbo, batch_size = 100, S = 100, total_batch = None):
    '''
    For correct use:
    ELBO for x_i must be calculated by SINGLE z sample from q(z|x_i)
    '''
    #from scipy.special import logsumexp    
    if total_batch is None:
        total_batch = int(data.num_examples / batch_size)
        
    avg_evi = 0
    for j in range(total_batch):
        test_xs = data.next_batch(batch_size)         
        elbo_accu = np.empty([batch_size,0])
        for i in range(S):
            elbo_i = sess.run(elbo,{x:test_xs})
            elbo_accu = np.append(elbo_accu,elbo_i,axis=1)
        
        evi0 = sess.run(tf.reduce_logsumexp(elbo_accu,axis = 1))
        evi = np.mean(evi0 - np.log(S))
        avg_evi += evi / total_batch
    return avg_evi    
    

#%% 
Example #18
Source File: lfm.py    From gntp with MIT License 6 votes vote down vote up
def multiclass_loss(self,
                        p_emb: tf.Tensor,
                        s_emb: tf.Tensor,
                        o_emb: tf.Tensor,
                        all_emb: tf.Tensor) -> tf.Tensor:
        # [B]
        x_ijk = self.score(p_emb, s_emb, o_emb)
        # [N,
        # [B, N]
        x_ij = self.score_sp(p_emb, s_emb, all_emb)
        x_jk = self.score_po(p_emb, all_emb, o_emb)
        # [B]
        lse_x_ij = tf.reduce_logsumexp(x_ij, 1)
        lse_x_jk = tf.reduce_logsumexp(x_jk, 1)
        # [B]
        losses = - x_ijk + lse_x_ij - x_ijk + lse_x_jk
        # Scalar
        loss = tf.reduce_mean(losses)
        return loss 
Example #19
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #20
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #21
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #22
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    # Returns
        The reduced tensor.
    """
    return tf.reduce_logsumexp(x, axis, keepdims) 
Example #23
Source File: expert_utils.py    From NMT_GAN with Apache License 2.0 5 votes vote down vote up
def SampledSoftmaxLoss(features, sampler, num_classes, target_classes,
                       target_params, sampled_classes, sampled_params):
  """Loss for training softmax classifiers on large label vocabulary.

  This function assumes that we have already chosen the sampled classes and
  fetched the parameters for the target classes and the sampled classes.

  Args:
    features: a Tensor with shape [batch_size, hidden_size]
    sampler: a candidate sampler object
      (see learning/brain/google/python/ops/candidate_sampling.py)
    num_classes: an integer
    target_classes: an integer Tensor with shape [batch_size]
    target_params: a Tensor with shape [batch_size, hidden_size]
      The parameters corresponding to the target classes.
    sampled_classes: an integer tensor with shape [num_sampled_classes]
    sampled_params: a Tensor with shape [num_sampled_classes, hidden_size]
      The parameters corresponding to the sampled classes.

  Returns:
    a Tensor with shape [batch_size]
  """
  sampled_logits = (tf.matmul(features, sampled_params, transpose_b=True) -
                    sampler.log_expected_count(sampled_classes))
  target_logits = (tf.reduce_sum(target_params * features, 1) -
                   sampler.log_expected_count(target_classes))
  sampled_log_denominator = tf.reduce_logsumexp(
      sampled_logits, [1], name='SampledLogDenominator')
  sampled_classes_mask = tf.unsorted_segment_sum(
      tf.fill(tf.shape(sampled_classes), float('-inf')), sampled_classes,
      num_classes)
  target_log_denominator = (
      target_logits + tf.gather(sampled_classes_mask, target_classes))
  combined_log_denominator = tf.reduce_logsumexp(
      tf.stack([sampled_log_denominator, target_log_denominator]), [0])
  loss = combined_log_denominator - target_logits
  return loss 
Example #24
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def logsumexp(self, x, axis=None, keepdims=False):
        return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims) 
Example #25
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def reduce_logsumexp(self, x, axis=None, keepdims=False):
        return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims) 
Example #26
Source File: search.py    From XMUNMT with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def log_prob_from_logits(logits):
    return logits - tf.reduce_logsumexp(logits, axis=2, keep_dims=True) 
Example #27
Source File: rat_spn.py    From supair with MIT License 5 votes vote down vote up
def forward(self, inputs):
        prods = tf.concat(inputs, 1)
        weights = self.weights

        if self.args.linear_sum_weights:
            sums = tf.log(tf.matmul(tf.exp(prods), tf.squeeze(self.weights)))
        else:
            prods = tf.expand_dims(prods, axis=-1)
            if self.dropout_op is not None:
                if self.args.drop_connect:
                    batch_size = prods.shape[0]
                    prod_num = prods.shape[1]
                    dropout_shape = [batch_size, prod_num, self.size]

                    random_tensor = random_ops.random_uniform(dropout_shape,
                                                              dtype=self.weights.dtype)
                    dropout_mask = tf.log(math_ops.floor(self.dropout_op + random_tensor))
                    weights = weights + dropout_mask

                else:
                    random_tensor = random_ops.random_uniform(prods.shape, dtype=prods.dtype)
                    dropout_mask = tf.log(math_ops.floor(self.dropout_op + random_tensor))
                    prods = prods + dropout_mask

            child_values = prods + weights
            self.max_child_idx = tf.argmax(child_values, axis=1)
            sums = tf.reduce_logsumexp(child_values, axis=1)

        return sums 
Example #28
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 5 votes vote down vote up
def logsumexp(x, axis=None):
        '''Returns `log(sum(exp(x), axis=axis))` with improved numerical stability.
        '''
        return tf.reduce_logsumexp(x, axis=[axis]) 
Example #29
Source File: cpc_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def WPC_Hidden(student_tensor, teacher_tensor, input_mask, opt=None):
	teacher_shape = bert_utils.get_shape_list(teacher_tensor[0], expected_rank=[3])
	student_shape = bert_utils.get_shape_list(student_tensor[0], expected_rank=[3])

	with tf.variable_scope("wpc_weights", reuse=tf.AUTO_REUSE): 
		cpc_weights = tf.get_variable(
				"weights", [student_shape[-1],teacher_shape[-1]],
				initializer=create_initializer(0.02)
				)

	flipped_student_tensor = flip_gradient(student_tensor[-1])
	flipped_teacher_tensor = flip_gradient(teacher_tensor[-1])

	# batch x seq x t_hidden
	student_tensor_proj = tf.einsum("abc,cd->abd", flipped_student_tensor, cpc_weights)
	# batch x seq x t_hidden and batch x seq x t_hidden
	# log exp(zt x W x ct)
	# batch x batch x seq
	cpc_tensor = tf.einsum("abd,cbd->acb", student_tensor_proj, flipped_teacher_tensor)

	mask = tf.cast(input_mask, tf.float32) # batch x seq

	joint_sample_mask = tf.eye(student_shape[0], dtype=bool)
	joint_sample_mask = tf.expand_dims(joint_sample_mask, axis=-1) # batch x batch x 1

	joint_masked_cpc_tensor = tf.cast(joint_sample_mask, tf.float32) * cpc_tensor
	marginal_masked_cpc_tensor = cpc_tensor

	# got each seq joint term
	joint_term = tf.reduce_sum(joint_masked_cpc_tensor, axis=[1]) # batch x seq

	marginal_term = tf.reduce_logsumexp(marginal_masked_cpc_tensor, axis=[1]) # batch x seq

	loss = -tf.reduce_sum((joint_term - marginal_term)*mask) / (1e-10 + tf.reduce_sum(mask))

	# wpc_grad = opt.compute_gradients(loss, [])
		
	# wpc_grad = tf.sqrt(tf.reduce_sum(tf.square(wpc_grad), axis=1))
	# wpc_grad_penality = tf.reduce_mean(tf.square(wpc_grad - 1.0) * 0.1)

	return loss 
Example #30
Source File: categorical.py    From tensorforce with Apache License 2.0 5 votes vote down vote up
def tf_parametrize(self, x, mask):
        epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))
        shape = (-1,) + self.action_spec['shape'] + (self.action_spec['num_values'],)

        # Action values
        action_values = self.action_values.apply(x=x)
        action_values = tf.reshape(tensor=action_values, shape=shape)

        if self.state_value is None:
            # Implicit states value (TODO: experimental)
            states_value = tf.reduce_logsumexp(input_tensor=action_values, axis=-1)

        else:
            # Explicit states value and advantage-based action values
            states_value = self.state_value.apply(x=x)
            states_value = tf.reshape(tensor=states_value, shape=shape[:-1])
            action_values = tf.expand_dims(input=states_value, axis=-1) + action_values
            action_values -= tf.math.reduce_mean(input_tensor=action_values, axis=-1, keepdims=True)

        # TODO: before or after states_value?
        min_float = tf.fill(
            dims=tf.shape(input=action_values), value=util.tf_dtype(dtype='float').min
        )
        action_values = tf.where(condition=mask, x=action_values, y=min_float)

        # Softmax for corresponding probabilities
        probabilities = tf.nn.softmax(logits=action_values, axis=-1)

        # "Normalized" logits
        logits = tf.math.log(x=tf.maximum(x=probabilities, y=epsilon))

        Module.update_tensor(name=(self.name + '-probabilities'), tensor=probabilities)
        Module.update_tensor(name=(self.name + '-values'), tensor=action_values)

        return logits, probabilities, states_value, action_values