Python tensorflow.keras.backend.maximum() Examples
The following are 13
code examples of tensorflow.keras.backend.maximum().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: backend_keras.py From kapre with MIT License | 6 votes |
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0): """[K] Convert (linear) amplitude to decibel (log10(x)). Parameters ---------- x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`. amin: minimum amplitude. amplitude smaller than `amin` is set to this. dynamic_range: dynamic_range in decibel """ log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx()) if K.ndim(x) > 1: axis = tuple(range(K.ndim(x))[1:]) else: axis = None log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True) # [-?, 0] log_spec = K.maximum(log_spec, -1 * dynamic_range) # [-80, 0] return log_spec
Example #2
Source File: ttfs_corrective.py From snn_toolbox with MIT License | 6 votes |
def get_psp(self, output_spikes): new_spiketimes = tf.where(k.greater(output_spikes, 0), k.ones_like(output_spikes) * self.time, self.last_spiketimes) new_spiketimes = tf.where(k.less(output_spikes, 0), k.zeros_like(output_spikes) * self.time, new_spiketimes) assign_new_spiketimes = tf.assign(self.last_spiketimes, new_spiketimes) with tf.control_dependencies([assign_new_spiketimes]): last_spiketimes = self.last_spiketimes + 0 # Dummy op # psp = k.maximum(0., tf.divide(self.dt, last_spiketimes)) psp = tf.where(k.greater(last_spiketimes, 0), k.ones_like(output_spikes) * self.dt, k.zeros_like(output_spikes)) return psp
Example #3
Source File: metrics.py From neuron with GNU General Public License v3.0 | 5 votes |
def _hard_max(tens, axis): """ we can't use the argmax function in a loss, as it's not differentiable We can use it in a metric, but not in a loss function therefore, we replace the 'hard max' operation (i.e. argmax + onehot) with this approximation """ tensmax = K.max(tens, axis=axis, keepdims=True) eps_hot = K.maximum(tens - tensmax + K.epsilon(), 0) one_hot = eps_hot / K.epsilon() return one_hot
Example #4
Source File: model_triplet.py From image_search_engine with MIT License | 5 votes |
def triplet_loss(y_true, y_pred, alpha=0.4): """ https://github.com/KinWaiCheuk/Triplet-net-keras/blob/master/Triplet%20NN%20Test%20on%20MNIST.ipynb Implementation of the triplet loss function Arguments: y_true -- true labels, required when you define a loss in Keras, you don't need it in this function. y_pred -- python list containing three objects: anchor -- the encodings for the anchor data positive -- the encodings for the positive data (similar to anchor) negative -- the encodings for the negative data (different from anchor) Returns: loss -- real number, value of the loss """ total_lenght = y_pred.shape.as_list()[-1] anchor = y_pred[:, 0:int(total_lenght * 1 / 3)] positive = y_pred[:, int(total_lenght * 1 / 3):int(total_lenght * 2 / 3)] negative = y_pred[:, int(total_lenght * 2 / 3):int(total_lenght * 3 / 3)] # distance between the anchor and the positive pos_dist = K.sum(K.square(anchor - positive), axis=1) # distance between the anchor and the negative neg_dist = K.sum(K.square(anchor - negative), axis=1) # compute loss basic_loss = pos_dist - neg_dist + alpha loss = K.maximum(basic_loss, 0.0) return loss
Example #5
Source File: utils.py From aitom with GNU General Public License v3.0 | 5 votes |
def correlation_coefficient_loss(y_true, y_pred): x = y_true y = y_pred mx = K.mean(x) my = K.mean(y) xm, ym = x-mx, y-my r_num = K.sum(tf.multiply(xm,ym)) r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym)))) r = r_num / r_den r = K.maximum(K.minimum(r, 1.0), -1.0) return 1 - K.square(r)
Example #6
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def softmax_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25): """ Compute softmax focal loss. Reference Paper: "Focal Loss for Dense Object Detection" https://arxiv.org/abs/1708.02002 # Arguments y_true: Ground truth targets, tensor of shape (?, num_boxes, num_classes). y_pred: Predicted logits, tensor of shape (?, num_boxes, num_classes). gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. # Returns softmax_focal_loss: Softmax focal loss, tensor of shape (?, num_boxes). """ # Scale predictions so that the class probas of each sample sum to 1 #y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # Clip the prediction value to prevent NaN's and Inf's #epsilon = K.epsilon() #y_pred = K.clip(y_pred, epsilon, 1. - epsilon) y_pred = tf.nn.softmax(y_pred) y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15) # Calculate Cross Entropy cross_entropy = -y_true * tf.math.log(y_pred) # Calculate Focal Loss softmax_focal_loss = alpha * tf.pow(1 - y_pred, gamma) * cross_entropy return softmax_focal_loss
Example #7
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def box_iou(b1, b2): """ Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) """ # Expand dim to apply broadcasting. b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh/2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh/2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou
Example #8
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def box_iou(b1, b2): """ Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) """ # Expand dim to apply broadcasting. #b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh/2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh/2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou
Example #9
Source File: FRN.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def frn_layer_paper(x, tau, beta, gamma, epsilon=1e-6): # x: Input tensor of shape [BxHxWxC]. # tau, beta, gamma: Variables of shape [1, 1, 1, C]. # eps: A scalar constant or learnable variable. # Compute the mean norm of activations per channel. nu2 = tf.reduce_mean(tf.square(x), axis=[1, 2], keepdims=True) # Perform FRN. x = x * tf.math.rsqrt(nu2 + tf.abs(epsilon)) # Return after applying the Offset-ReLU non-linearity. return tf.maximum(gamma * x + beta, tau)
Example #10
Source File: FRN.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def frn_layer_keras(x, tau, beta, gamma, epsilon=1e-6): # x: Input tensor of shape [BxHxWxC]. # tau, beta, gamma: Variables of shape [1, 1, 1, C]. # eps: A scalar constant or learnable variable. # Compute the mean norm of activations per channel. nu2 = K.mean(K.square(x), axis=[1, 2], keepdims=True) # Perform FRN. x = x * 1 / K.sqrt(nu2 + K.abs(epsilon)) # Return after applying the Offset-ReLU non-linearity. return K.maximum(gamma * x + beta, tau)
Example #11
Source File: metrics.py From neuron with GNU General Public License v3.0 | 4 votes |
def dice(self, y_true, y_pred): """ compute dice for given Tensors """ if self.crop_indices is not None: y_true = utils.batch_gather(y_true, self.crop_indices) y_pred = utils.batch_gather(y_pred, self.crop_indices) if self.input_type == 'prob': # We assume that y_true is probabilistic, but just in case: if self.re_norm: y_true = tf.div_no_nan(y_true, K.sum(y_true, axis=-1, keepdims=True)) y_true = K.clip(y_true, K.epsilon(), 1) # make sure pred is a probability if self.re_norm: y_pred = tf.div_no_nan(y_pred, K.sum(y_pred, axis=-1, keepdims=True)) y_pred = K.clip(y_pred, K.epsilon(), 1) # Prepare the volumes to operate on # If we're doing 'hard' Dice, then we will prepare one-hot-based matrices of size # [batch_size, nb_voxels, nb_labels], where for each voxel in each batch entry, # the entries are either 0 or 1 if self.dice_type == 'hard': # if given predicted probability, transform to "hard max"" if self.input_type == 'prob': if self.approx_hard_max: y_pred_op = _hard_max(y_pred, axis=-1) y_true_op = _hard_max(y_true, axis=-1) else: y_pred_op = _label_to_one_hot(K.argmax(y_pred, axis=-1), self.nb_labels) y_true_op = _label_to_one_hot(K.argmax(y_true, axis=-1), self.nb_labels) # if given predicted label, transform to one hot notation else: assert self.input_type == 'max_label' y_pred_op = _label_to_one_hot(y_pred, self.nb_labels) y_true_op = _label_to_one_hot(y_true, self.nb_labels) # If we're doing soft Dice, require prob output, and the data already is as we need it # [batch_size, nb_voxels, nb_labels] else: assert self.input_type == 'prob', "cannot do soft dice with max_label input" y_pred_op = y_pred y_true_op = y_true # reshape to [batch_size, nb_voxels, nb_labels] batch_size = K.shape(y_true)[0] y_pred_op = K.reshape(y_pred_op, [batch_size, -1, K.shape(y_true)[-1]]) y_true_op = K.reshape(y_true_op, [batch_size, -1, K.shape(y_true)[-1]]) # compute dice for each entry in batch. # dice will now be [batch_size, nb_labels] top = 2 * K.sum(y_true_op * y_pred_op, 1) bottom = K.sum(K.square(y_true_op), 1) + K.sum(K.square(y_pred_op), 1) # make sure we have no 0s on the bottom. K.epsilon() bottom = K.maximum(bottom, self.area_reg) return top / bottom
Example #12
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 4 votes |
def box_giou(b_true, b_pred): """ Calculate GIoU loss on anchor boxes Reference Paper: "Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression" https://arxiv.org/abs/1902.09630 Parameters ---------- b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh Returns ------- giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1) """ b_true_xy = b_true[..., :2] b_true_wh = b_true[..., 2:4] b_true_wh_half = b_true_wh/2. b_true_mins = b_true_xy - b_true_wh_half b_true_maxes = b_true_xy + b_true_wh_half b_pred_xy = b_pred[..., :2] b_pred_wh = b_pred[..., 2:4] b_pred_wh_half = b_pred_wh/2. b_pred_mins = b_pred_xy - b_pred_wh_half b_pred_maxes = b_pred_xy + b_pred_wh_half intersect_mins = K.maximum(b_true_mins, b_pred_mins) intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1] b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1] union_area = b_true_area + b_pred_area - intersect_area # calculate IoU, add epsilon in denominator to avoid dividing by 0 iou = intersect_area / (union_area + K.epsilon()) # get enclosed area enclose_mins = K.minimum(b_true_mins, b_pred_mins) enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes) enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] # calculate GIoU, add epsilon in denominator to avoid dividing by 0 giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon()) giou = K.expand_dims(giou, -1) return giou
Example #13
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 4 votes |
def box_giou(b_true, b_pred): """ Calculate GIoU loss on anchor boxes Reference Paper: "Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression" https://arxiv.org/abs/1902.09630 Parameters ---------- b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh Returns ------- giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1) """ b_true_xy = b_true[..., :2] b_true_wh = b_true[..., 2:4] b_true_wh_half = b_true_wh/2. b_true_mins = b_true_xy - b_true_wh_half b_true_maxes = b_true_xy + b_true_wh_half b_pred_xy = b_pred[..., :2] b_pred_wh = b_pred[..., 2:4] b_pred_wh_half = b_pred_wh/2. b_pred_mins = b_pred_xy - b_pred_wh_half b_pred_maxes = b_pred_xy + b_pred_wh_half intersect_mins = K.maximum(b_true_mins, b_pred_mins) intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1] b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1] union_area = b_true_area + b_pred_area - intersect_area # calculate IoU, add epsilon in denominator to avoid dividing by 0 iou = intersect_area / (union_area + K.epsilon()) # get enclosed area enclose_mins = K.minimum(b_true_mins, b_pred_mins) enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes) enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] # calculate GIoU, add epsilon in denominator to avoid dividing by 0 giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon()) giou = K.expand_dims(giou, -1) return giou