Python tensorflow.keras.backend.log() Examples

The following are 30 code examples of tensorflow.keras.backend.log(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: loss.py    From Advanced-Deep-Learning-with-Keras with MIT License 7 votes vote down vote up
def focal_loss_binary(y_true, y_pred):
    """Binary cross-entropy focal loss
    """
    gamma = 2.0
    alpha = 0.25

    pt_1 = tf.where(tf.equal(y_true, 1),
                    y_pred,
                    tf.ones_like(y_pred))
    pt_0 = tf.where(tf.equal(y_true, 0),
                    y_pred,
                    tf.zeros_like(y_pred))

    epsilon = K.epsilon()
    # clip to prevent NaN and Inf
    pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
    pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)

    weight = alpha * K.pow(1. - pt_1, gamma)
    fl1 = -K.sum(weight * K.log(pt_1))
    weight = (1 - alpha) * K.pow(pt_0, gamma)
    fl0 = -K.sum(weight * K.log(1. - pt_0))

    return fl1 + fl0 
Example #2
Source File: backend_keras.py    From kapre with MIT License 6 votes vote down vote up
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
    """[K] Convert (linear) amplitude to decibel (log10(x)).

    Parameters
    ----------
    x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`.

    amin: minimum amplitude. amplitude smaller than `amin` is set to this.

    dynamic_range: dynamic_range in decibel

    """
    log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
    if K.ndim(x) > 1:
        axis = tuple(range(K.ndim(x))[1:])
    else:
        axis = None

    log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True)  # [-?, 0]
    log_spec = K.maximum(log_spec, -1 * dynamic_range)  # [-80, 0]
    return log_spec 
Example #3
Source File: nnet_survival.py    From nnet-survival with MIT License 6 votes vote down vote up
def surv_likelihood(n_intervals):
  """Create custom Keras loss function for neural network survival model. 
  Arguments
      n_intervals: the number of survival time intervals
  Returns
      Custom loss function that can be used with Keras
  """
  def loss(y_true, y_pred):
    """
    Required to have only 2 arguments by Keras.
    Arguments
        y_true: Tensor.
          First half of the values is 1 if individual survived that interval, 0 if not.
          Second half of the values is for individuals who failed, and is 1 for time interval during which failure occured, 0 for other intervals.
          See make_surv_array function.
        y_pred: Tensor, predicted survival probability (1-hazard probability) for each time interval.
    Returns
        Vector of losses for this minibatch.
    """
    cens_uncens = 1. + y_true[:,0:n_intervals] * (y_pred-1.) #component for all individuals
    uncens = 1. - y_true[:,n_intervals:2*n_intervals] * y_pred #component for only uncensored individuals
    return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood
  return loss 
Example #4
Source File: mine-13.8.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def mi_loss(self, y_true, y_pred):
        """ MINE loss function

        Arguments:
            y_true (tensor): Not used since this is
                unsupervised learning
            y_pred (tensor): stack of predictions for
                joint T(x,y) and marginal T(x,y)
        """
        size = self.args.batch_size
        # lower half is pred for joint dist
        pred_xy = y_pred[0: size, :]

        # upper half is pred for marginal dist
        pred_x_y = y_pred[size : y_pred.shape[0], :]
        loss = K.mean(K.exp(pred_x_y))
        loss = K.clip(loss, K.epsilon(), np.finfo(float).max)
        loss = K.mean(pred_xy) - K.log(loss)
        return -loss 
Example #5
Source File: loss.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def focal_loss_categorical(y_true, y_pred):
    """Categorical cross-entropy focal loss"""
    gamma = 2.0
    alpha = 0.25

    # scale to ensure sum of prob is 1.0
    y_pred /= K.sum(y_pred, axis=-1, keepdims=True)

    # clip the prediction value to prevent NaN and Inf
    epsilon = K.epsilon()
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)

    # calculate cross entropy
    cross_entropy = -y_true * K.log(y_pred)

    # calculate focal loss
    weight = alpha * K.pow(1 - y_pred, gamma)
    cross_entropy *= weight

    return K.sum(cross_entropy, axis=-1) 
Example #6
Source File: operation_layers.py    From onnx2keras with MIT License 6 votes vote down vote up
def convert_log(node, params, layers, lambda_func, node_name, keras_name):
    """
    Convert Log layer
    :param node: current operation node
    :param params: operation attributes
    :param layers: available keras layers
    :param lambda_func: function for keras Lambda layer
    :param node_name: internal converter name
    :param keras_name: resulting layer name
    :return: None
    """
    if len(node.input) != 1:
        assert AttributeError('More than 1 input for log layer.')

    input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

    def target_layer(x):
        import tensorflow.keras.backend as K
        return K.log(x)

    lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
    layers[node_name] = lambda_layer(input_0)
    lambda_func[keras_name] = target_layer 
Example #7
Source File: operation_layers.py    From onnx2keras with MIT License 6 votes vote down vote up
def convert_exp(node, params, layers, lambda_func, node_name, keras_name):
    """
    Convert Exp layer
    :param node: current operation node
    :param params: operation attributes
    :param layers: available keras layers
    :param lambda_func: function for keras Lambda layer
    :param node_name: resulting layer name
    :return: None
    """
    if len(node.input) != 1:
        assert AttributeError('More than 1 input for log layer.')

    input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

    def target_layer(x):
        import tensorflow.keras.backend as K
        return K.exp(x)

    lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
    layers[node_name] = lambda_layer(input_0)
    lambda_func[keras_name] = target_layer 
Example #8
Source File: loss.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def softmax_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25):
    """
    Compute softmax focal loss.
    Reference Paper:
        "Focal Loss for Dense Object Detection"
        https://arxiv.org/abs/1708.02002

    # Arguments
        y_true: Ground truth targets,
            tensor of shape (?, num_boxes, num_classes).
        y_pred: Predicted logits,
            tensor of shape (?, num_boxes, num_classes).
        gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
        alpha: optional alpha weighting factor to balance positives vs negatives.

    # Returns
        softmax_focal_loss: Softmax focal loss, tensor of shape (?, num_boxes).
    """

    # Scale predictions so that the class probas of each sample sum to 1
    #y_pred /= K.sum(y_pred, axis=-1, keepdims=True)

    # Clip the prediction value to prevent NaN's and Inf's
    #epsilon = K.epsilon()
    #y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
    y_pred = tf.nn.softmax(y_pred)
    y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)

    # Calculate Cross Entropy
    cross_entropy = -y_true * tf.math.log(y_pred)

    # Calculate Focal Loss
    softmax_focal_loss = alpha * tf.pow(1 - y_pred, gamma) * cross_entropy

    return softmax_focal_loss 
Example #9
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def positions_func(inputs, pad=0):
    """
    A layer filling i-th column of a 2D tensor with
    1+ln(1+i) when it contains a meaningful symbol
    and with 0 when it contains PAD
    """
    position_inputs = K.cumsum(K.ones_like(inputs, dtype="float32"), axis=1)
    position_inputs *= K.cast(K.not_equal(inputs, pad), "float32")
    return K.log(1.0 + position_inputs) 
Example #10
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        assert isinstance(inputs, list) and len(inputs) == 3
        first, second, features = inputs[0], inputs[1], inputs[2]
        if not self.from_logits:
            first = K.clip(first, 1e-10, 1.0)
            second = K.clip(second, 1e-10, 1.0)
            first_, second_ = K.log(first), K.log(second)
        else:
            first_, second_ = first, second
        # embedded_features.shape = (M, T, 1)
        if self.use_intermediate_layer:
            features = K.dot(features, self.first_kernel)
            features = K.bias_add(features, self.first_bias, data_format="channels_last")
            features = self.intermediate_activation(features)
        embedded_features = K.dot(features, self.features_kernel)
        embedded_features = K.bias_add(
            embedded_features, self.features_bias, data_format="channels_last")
        if self.use_dimension_bias:
            tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
            embedded_features = K.tile(embedded_features, tiling_shape)
            embedded_features = K.bias_add(
                embedded_features, self.dimensions_bias, data_format="channels_last")
        sigma = K.sigmoid(embedded_features)

        result = weighted_sum(first_, second_, sigma,
                              self.first_threshold, self.second_threshold)
        probs = K.softmax(result)
        if self.return_logits:
            return [probs, result]
        return probs 
Example #11
Source File: activations.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def softplus2(x):
    """
    out = log(exp(x)+1) - log(2)
    softplus function that is 0 at x=0, the implementation aims at avoiding overflow

    Args:
        x: (Tensor) input tensor

    Returns:
         (Tensor) output tensor
    """
    return kb.relu(x) + kb.log(0.5*kb.exp(-kb.abs(x)) + 0.5) 
Example #12
Source File: _keras_losses.py    From solaris with Apache License 2.0 5 votes vote down vote up
def k_focal_loss(gamma=2, alpha=0.75):
    # from github.com/atomwh/focalloss_keras

    def focal_loss_fixed(y_true, y_pred):  # with tensorflow

        eps = 1e-12  # improve the stability of the focal loss
        y_pred = K.clip(y_pred, eps, 1.-eps)
        pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
        pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
        return -K.sum(
            alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum(
                (1-alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))

    return focal_loss_fixed 
Example #13
Source File: _keras_losses.py    From solaris with Apache License 2.0 5 votes vote down vote up
def k_lovasz_hinge(per_image=False):
    """Wrapper for the Lovasz Hinge Loss Function, for use in Keras.

    This is a mess. I'm sorry.
    """

    def lovasz_hinge_flat(y_true, y_pred):
        # modified from Maxim Berman's GitHub repo tf implementation for Lovasz
        eps = 1e-12  # for stability
        y_pred = K.clip(y_pred, eps, 1-eps)
        logits = K.log(y_pred/(1-y_pred))
        logits = tf.reshape(logits, (-1,))
        y_true = tf.reshape(y_true, (-1,))
        y_true = tf.cast(y_true, logits.dtype)
        signs = 2. * y_true - 1.
        errors = 1. - logits * tf.stop_gradient(signs)
        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0],
                                          name="descending_sort")
        gt_sorted = tf.gather(y_true, perm)
        grad = tf_lovasz_grad(gt_sorted)
        loss = tf.tensordot(tf.nn.relu(errors_sorted),
                            tf.stop_gradient(grad),
                            1, name="loss_non_void")
        return loss

    def lovasz_hinge_per_image(y_true, y_pred):
        # modified from Maxim Berman's GitHub repo tf implementation for Lovasz
        losses = tf.map_fn(_treat_image, (y_true, y_pred), dtype=tf.float32)
        loss = tf.reduce_mean(losses)
        return loss

    def _treat_image(ytrue_ypred):
        y_true, y_pred = ytrue_ypred
        y_true, y_pred = tf.expand_dims(y_true, 0), tf.expand_dims(y_pred, 0)
        return lovasz_hinge_flat(y_true, y_pred)

    if per_image:
        return lovasz_hinge_per_image
    else:
        return lovasz_hinge_flat 
Example #14
Source File: focal_loss.py    From pcc_geo_cnn with MIT License 5 votes vote down vote up
def focal_loss(y_true, y_pred, gamma=2, alpha=0.95):
    pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
    pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))

    pt_1 = K.clip(pt_1, 1e-3, .999)
    pt_0 = K.clip(pt_0, 1e-3, .999)

    return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0)) 
Example #15
Source File: training.py    From medaka with Mozilla Public License 2.0 5 votes vote down vote up
def qscore(y_true, y_pred):
    """Keras metric function for calculating scaled error.

    :param y_true: tensor of true class labels.
    :param y_pred: class output scores from network.

    :returns: class error expressed as a phred score.
    """
    from tensorflow.keras import backend as K
    error = K.cast(K.not_equal(
        K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1), K.floatx())),
        K.floatx()
    )
    error = K.sum(error) / K.sum(K.ones_like(error))
    return -10.0 * 0.434294481 * K.log(error) 
Example #16
Source File: WCCE.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def weighted_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy
    
    Variables:
        weights: numpy array of shape (C,) where C is the number of classes
    
    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')
    """
    
    weights = K.variable(weights)
        
    def loss(y_true, y_pred):
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss
    
    return loss 
Example #17
Source File: nnet_survival.py    From nnet-survival with MIT License 5 votes vote down vote up
def surv_likelihood_rnn(n_intervals):
  """Create custom Keras loss function for neural network survival model. Used for recurrent neural networks with time-distributed output.
       This function is very similar to surv_likelihood but deals with the extra dimension of y_true and y_pred that exists because of the time-distributed output.
  """
  def loss(y_true, y_pred):
    cens_uncens = 1. + y_true[0,:,0:n_intervals] * (y_pred-1.) #component for all patients
    uncens = 1. - y_true[0,:,n_intervals:2*n_intervals] * y_pred #component for only uncensored patients
    return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood
  return loss 
Example #18
Source File: B_Focal_loss.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def focal_loss(gamma=2., alpha=.25):
	def focal_loss_fixed(y_true, y_pred):
		pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
		pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
		return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
	return focal_loss_fixed 
Example #19
Source File: SpectralPooling.py    From aitom with GNU General Public License v3.0 5 votes vote down vote up
def call(self, tensors, mask=None):
        if self.homomorphic == True:
            tensors = K.log(tensors) 
            
        x_dct = self._dct3D(tensors)
        x_crop = self._cropping3D(x_dct)
        x_idct = self._idct3D(x_crop)
        
        if self.homomorphic == True:
            x_idct = K.exp(x_idct) 
            
        return x_idct 
Example #20
Source File: mine-13.8.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def compute_mi(cov_xy=0.5, n_bins=100):
    """Analytic computation of MI using binned 
        2D Gaussian

    Arguments:
        cov_xy (list): Off-diagonal elements of covariance
            matrix
        n_bins (int): Number of bins to "quantize" the
            continuous 2D Gaussian
    """
    cov=[[1, cov_xy], [cov_xy, 1]]
    data = sample(cov=cov)
    # get joint distribution samples
    # perform histogram binning
    joint, edge = np.histogramdd(data, bins=n_bins)
    joint /= joint.sum()
    eps = np.finfo(float).eps
    joint[joint<eps] = eps
    # compute marginal distributions
    x, y = margins(joint)

    xy = x*y
    xy[xy<eps] = eps
    # MI is P(X,Y)*log(P(X,Y)/P(X)*P(Y))
    mi = joint*np.log(joint/xy)
    mi = mi.sum()
    print("Computed MI: %0.6f" % mi)
    return mi 
Example #21
Source File: iic-13.5.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def mi_loss(self, y_true, y_pred):
        """Mutual information loss computed from the joint
           distribution matrix and the marginals

        Arguments:
            y_true (tensor): Not used since this is
                unsupervised learning
            y_pred (tensor): stack of softmax predictions for
                the Siamese latent vectors (Z and Zbar)
        """
        size = self.args.batch_size
        n_labels = y_pred.shape[-1]
        # lower half is Z
        Z = y_pred[0: size, :]
        Z = K.expand_dims(Z, axis=2)
        # upper half is Zbar
        Zbar = y_pred[size: y_pred.shape[0], :]
        Zbar = K.expand_dims(Zbar, axis=1)
        # compute joint distribution (Eq 10.3.2 & .3)
        P = K.batch_dot(Z, Zbar)
        P = K.sum(P, axis=0)
        # enforce symmetric joint distribution (Eq 10.3.4)
        P = (P + K.transpose(P)) / 2.0
        # normalization of total probability to 1.0
        P = P / K.sum(P)
        # marginal distributions (Eq 10.3.5 & .6)
        Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
        Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
        Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
        Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
        P = K.clip(P, K.epsilon(), np.finfo(float).max)
        Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
        Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
        # negative MI loss (Eq 10.3.7)
        neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
        # each head contribute 1/n_heads to the total loss
        return neg_mi/self.args.heads 
Example #22
Source File: infogan-mnist-6.1.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def mi_loss(c, q_of_c_given_x):
    """ Mutual information, Equation 5 in [2],
        assuming H(c) is constant
    """
    # mi_loss = -c * log(Q(c|x))
    return -K.mean(K.sum(c * K.log(q_of_c_given_x + K.epsilon()), 
                                   axis=1)) 
Example #23
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def _need_exponent_sign_bit_check(max_value):
  """Checks whether the sign bit of exponent is needed.

  This is used by quantized_po2 and quantized_relu_po2.

  Args:
    max_value: the maximum value allowed.

  Returns:
    An integer. 1: sign_bit is needed. 0: sign_bit is not needed.
  """

  if max_value is not None:
    if max_value < 0:
      raise ValueError("po2 max_value should be non-negative.")
    if max_value > 1:
      # if max_value is larger than 1,
      #   the exponent could be positive and negative.
      #   e.g., log(max_value) > 0 when max_value > 1
      need_exponent_sign_bit = 1
    else:
      need_exponent_sign_bit = 0
  else:
    # max_value is not specified, so we cannot decide the range.
    # Then we need to put sign_bit for exponent to be safe
    need_exponent_sign_bit = 1
  return need_exponent_sign_bit 
Example #24
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def __call__(self, x):
    non_sign_bits = self.bits - 1
    m = pow(2, non_sign_bits)
    m_i = pow(2, self.integer)
    p = _sigmoid(x / m_i) * m
    rp = 2.0 * (_round_through(p) / m) - 1.0
    u_law_p = tf.sign(rp) * tf.keras.backend.log(
        1 + self.u * tf.abs(rp)) / tf.keras.backend.log(1 + self.u)
    xq = m_i * tf.keras.backend.clip(u_law_p, -1.0 +
                                     (1.0 * self.symmetric) / m, 1.0 - 1.0 / m)
    return xq 
Example #25
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def stochastic_round_po2(x):
  """Performs stochastic rounding for the power of two."""
  # TODO(hzhuang): test stochastic_round_po2 and constraint.
  # because quantizer is applied after constraint.
  y = tf.abs(x)
  eps = tf.keras.backend.epsilon()
  log2 = tf.keras.backend.log(2.0)

  x_log2 = tf.round(tf.keras.backend.log(y + eps) / log2)
  po2 = tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
  left_val = tf.where(po2 > y, x_log2 - 1, x_log2)
  right_val = tf.where(po2 > y, x_log2, x_log2 + 1)
  # sampling in [2**left_val, 2**right_val].
  minval = 2 ** left_val
  maxval = 2 ** right_val
  val = tf.random.uniform(tf.shape(y), minval=minval, maxval=maxval)
  # use y as a threshold to keep the probabliy [2**left_val, y, 2**right_val]
  # so that the mean value of the sample should be y
  x_po2 = tf.where(y < val, left_val, right_val)
  """
  x_log2 = stochastic_round(tf.keras.backend.log(y + eps) / log2)
  sign = tf.sign(x)
  po2 = (
      tf.sign(x) *
      tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
  )
  """
  return x_po2 
Example #26
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def _get_scale(alpha, x, q):
  """Gets scaling factor for scaling the tensor per channel.

  Arguments:
    alpha: A float or string. When it is string, it should be either "auto" or
      "auto_po2", and
       scale = sum(x * q, axis=all but last) / sum(q * q, axis=all but last)
     x: A tensor object. Its elements are in float.
     q: A tensor object. Its elements are in quantized format of x.

  Returns:
    A scaling factor tensor or scala for scaling tensor per channel.
  """

  if isinstance(alpha, six.string_types) and "auto" in alpha:
    assert alpha in ["auto", "auto_po2"]
    x_shape = x.shape.as_list()
    len_axis = len(x_shape)
    if len_axis > 1:
      if K.image_data_format() == "channels_last":
        axis = list(range(len_axis - 1))
      else:
        axis = list(range(1, len_axis))
      qx = K.mean(tf.math.multiply(x, q), axis=axis, keepdims=True)
      qq = K.mean(tf.math.multiply(q, q), axis=axis, keepdims=True)
    else:
      qx = K.mean(x * q, axis=0, keepdims=True)
      qq = K.mean(q * q, axis=0, keepdims=True)
    scale = qx / (qq + K.epsilon())
    if alpha == "auto_po2":
      scale = K.pow(2.0,
                    tf.math.round(K.log(scale + K.epsilon()) / np.log(2.0)))
  elif alpha is None:
    scale = 1.0
  elif isinstance(alpha, np.ndarray):
    scale = alpha
  else:
    scale = float(alpha)
  return scale 
Example #27
Source File: example_qoctave.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def customLoss(y_true,y_pred):
  log1 = 1.5 * y_true * K.log(y_pred + 1e-9) * K.pow(1-y_pred, 2)
  log0 = 0.5 * (1 - y_true) * K.log((1 - y_pred) + 1e-9) * K.pow(y_pred, 2)
  return (- K.sum(K.mean(log0 + log1, axis = 0))) 
Example #28
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def logtanh(x, a=1):
    """
    log * tanh

    See Also: arcsinh
    """
    return K.tanh(x) *  K.log(2 + a * abs(x)) 
Example #29
Source File: metrics.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def loss(self, y_true, y_pred):
        """ categorical crossentropy loss """

        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.use_float16:
            y_true = K.cast(y_true, 'float16')
            y_pred = K.cast(y_pred, 'float16')

        # scale and clip probabilities
        # this should not be necessary for softmax output.
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute log probability
        log_post = K.log(y_pred)  # likelihood

        # loss
        loss = - y_true * log_post

        # weighted loss
        if self.weights is not None:
            loss *= self.weights

        if self.vox_weights is not None:
            loss *= self.vox_weights

        # take the total loss
        # loss = K.batch_flatten(loss)
        mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1))
        tf.verify_tensor_all_finite(mloss, 'Loss not finite')
        return mloss 
Example #30
Source File: models.py    From neuron with GNU General Public License v3.0 4 votes vote down vote up
def add_prior(input_model,
              prior_shape,
              name='prior_model',
              prefix=None,
              use_logp=True,
              final_pred_activation='softmax',
              add_prior_layer_reg=0):
    """
    Append post-prior layer to a given model
    """

    # naming
    model_name = name
    if prefix is None:
        prefix = model_name

    # prior input layer
    prior_input_name = '%s-input' % prefix
    prior_tensor = KL.Input(shape=prior_shape, name=prior_input_name)
    prior_tensor_input = prior_tensor
    like_tensor = input_model.output

    # operation varies depending on whether we log() prior or not.
    if use_logp:
        # name = '%s-log' % prefix
        # prior_tensor = KL.Lambda(_log_layer_wrap(add_prior_layer_reg), name=name)(prior_tensor)
        print("Breaking change: use_logp option now requires log input!", file=sys.stderr)
        merge_op = KL.add

    else:
        # using sigmoid to get the likelihood values between 0 and 1
        # note: they won't add up to 1.
        name = '%s_likelihood_sigmoid' % prefix
        like_tensor = KL.Activation('sigmoid', name=name)(like_tensor)
        merge_op = KL.multiply

    # merge the likelihood and prior layers into posterior layer
    name = '%s_posterior' % prefix
    post_tensor = merge_op([prior_tensor, like_tensor], name=name)

    # output prediction layer
    # we use a softmax to compute P(L_x|I) where x is each location
    pred_name = '%s_prediction' % prefix
    if final_pred_activation == 'softmax':
        assert use_logp, 'cannot do softmax when adding prior via P()'
        print("using final_pred_activation %s for %s" % (final_pred_activation, model_name))
        softmax_lambda_fcn = lambda x: tf.keras.activations.softmax(x, axis=-1)
        pred_tensor = KL.Lambda(softmax_lambda_fcn, name=pred_name)(post_tensor)

    else:
        pred_tensor = KL.Activation('linear', name=pred_name)(post_tensor)

    # create the model
    model_inputs = [*input_model.inputs, prior_tensor_input]
    model = Model(inputs=model_inputs, outputs=[pred_tensor], name=model_name)

    # compile
    return model