Python tensorflow.keras.backend.epsilon() Examples
The following are 30
code examples of tensorflow.keras.backend.epsilon().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: loss.py From Advanced-Deep-Learning-with-Keras with MIT License | 7 votes |
def focal_loss_binary(y_true, y_pred): """Binary cross-entropy focal loss """ gamma = 2.0 alpha = 0.25 pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred)) pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred)) epsilon = K.epsilon() # clip to prevent NaN and Inf pt_1 = K.clip(pt_1, epsilon, 1. - epsilon) pt_0 = K.clip(pt_0, epsilon, 1. - epsilon) weight = alpha * K.pow(1. - pt_1, gamma) fl1 = -K.sum(weight * K.log(pt_1)) weight = (1 - alpha) * K.pow(pt_0, gamma) fl0 = -K.sum(weight * K.log(1. - pt_0)) return fl1 + fl0
Example #2
Source File: train.py From object-localization with MIT License | 6 votes |
def focal_loss(alpha=0.9, gamma=2): def focal_loss_with_logits(logits, targets, alpha, gamma, y_pred): weight_a = alpha * (1 - y_pred) ** gamma * targets weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets) return (tf.math.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu(-logits)) * (weight_a + weight_b) + logits * weight_b def loss(y_true, y_pred): y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon()) logits = tf.math.log(y_pred / (1 - y_pred)) loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred) return tf.reduce_mean(loss) return loss
Example #3
Source File: loss.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def focal_loss_categorical(y_true, y_pred): """Categorical cross-entropy focal loss""" gamma = 2.0 alpha = 0.25 # scale to ensure sum of prob is 1.0 y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # clip the prediction value to prevent NaN and Inf epsilon = K.epsilon() y_pred = K.clip(y_pred, epsilon, 1. - epsilon) # calculate cross entropy cross_entropy = -y_true * K.log(y_pred) # calculate focal loss weight = alpha * K.pow(1 - y_pred, gamma) cross_entropy *= weight return K.sum(cross_entropy, axis=-1)
Example #4
Source File: mine-13.8.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def mi_loss(self, y_true, y_pred): """ MINE loss function Arguments: y_true (tensor): Not used since this is unsupervised learning y_pred (tensor): stack of predictions for joint T(x,y) and marginal T(x,y) """ size = self.args.batch_size # lower half is pred for joint dist pred_xy = y_pred[0: size, :] # upper half is pred for marginal dist pred_x_y = y_pred[size : y_pred.shape[0], :] loss = K.mean(K.exp(pred_x_y)) loss = K.clip(loss, K.epsilon(), np.finfo(float).max) loss = K.mean(pred_xy) - K.log(loss) return -loss
Example #5
Source File: nnet_survival.py From nnet-survival with MIT License | 6 votes |
def surv_likelihood(n_intervals): """Create custom Keras loss function for neural network survival model. Arguments n_intervals: the number of survival time intervals Returns Custom loss function that can be used with Keras """ def loss(y_true, y_pred): """ Required to have only 2 arguments by Keras. Arguments y_true: Tensor. First half of the values is 1 if individual survived that interval, 0 if not. Second half of the values is for individuals who failed, and is 1 for time interval during which failure occured, 0 for other intervals. See make_surv_array function. y_pred: Tensor, predicted survival probability (1-hazard probability) for each time interval. Returns Vector of losses for this minibatch. """ cens_uncens = 1. + y_true[:,0:n_intervals] * (y_pred-1.) #component for all individuals uncens = 1. - y_true[:,n_intervals:2*n_intervals] * y_pred #component for only uncensored individuals return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood return loss
Example #6
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) optimizer = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss={"coords" : log_mse, "classes" : focal_loss()}, loss_weights={"coords" : 1, "classes" : 1}, optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") model.summary() model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #7
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model(trainable=TRAINABLE) model.summary() if TRAINABLE: model.load_weights(WEIGHTS) train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) optimizer = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss=loss, optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_dice:.2f}.h5", monitor="val_dice", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_dice", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_dice", factor=0.2, patience=5, min_lr=1e-6, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #8
Source File: predictor.py From Deep-Channel with MIT License | 6 votes |
def mcor(y_true, y_pred): # matthews_correlation y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator = (tp * tn - fp * fn) denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) return numerator / (denominator + K.epsilon())
Example #9
Source File: metrics.py From solaris with Apache License 2.0 | 6 votes |
def recall(y_true, y_pred): """Precision for foreground pixels. Calculates pixelwise recall TP/(TP + FN). """ # count true positives truth = K.round(K.clip(y_true, K.epsilon(), 1)) pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1)) true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2), dtype='float64')) truth_ct = K.sum(K.round(K.clip(y_true, K.epsilon(), 1))) if truth_ct == 0: return 0 recall = true_pos/truth_ct return recall
Example #10
Source File: deepchannel_train.py From Deep-Channel with MIT License | 6 votes |
def mcor(y_true, y_pred): # Matthews correlation y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator = (tp * tn - fp * fn) denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) return numerator / (denominator + K.epsilon())
Example #11
Source File: run_clinicnet.py From CDSS with GNU General Public License v3.0 | 5 votes |
def f1(y_true, y_pred): K.set_epsilon(1e-05) def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ K.set_epsilon(1e-05) #y_pred = tf.convert_to_tensor(y_pred, np.float32) #y_true = tf.convert_to_tensor(y_true, np.float32) true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ K.set_epsilon(1e-05) #y_pred = tf.convert_to_tensor(y_pred, np.float32) #y_true = tf.convert_to_tensor(y_true, np.float32) true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon()))
Example #12
Source File: keras_words_subtoken_metrics.py From code2vec with MIT License | 5 votes |
def result(self): recall = tf.math.divide_no_nan(self.tp, self.tp + self.fn) precision = tf.math.divide_no_nan(self.tp, self.tp + self.fp) f1 = tf.math.divide_no_nan(2 * precision * recall, precision + recall + K.epsilon()) return f1
Example #13
Source File: metrics.py From solaris with Apache License 2.0 | 5 votes |
def precision(y_true, y_pred): """Precision for foreground pixels. Calculates pixelwise precision TP/(TP + FP). """ # count true positives truth = K.round(K.clip(y_true, K.epsilon(), 1)) pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1)) true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2), dtype='float64')) pred_pos_ct = K.sum(pred_pos) + K.epsilon() precision = true_pos/pred_pos_ct return precision
Example #14
Source File: losses.py From ivis with GNU General Public License v2.0 | 5 votes |
def _euclidean_distance(x, y): return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=-1, keepdims=True), K.epsilon()))
Example #15
Source File: utils.py From snn_toolbox with MIT License | 5 votes |
def precision(y_true, y_pred): """Precision metric. Computes the precision, a metric for multi-label classification of how many selected items are relevant. Only computes a batch-wise average of precision. """ import tensorflow.keras.backend as k true_positives = k.sum(k.round(k.clip(y_true * y_pred, 0, 1))) predicted_positives = k.sum(k.round(k.clip(y_pred, 0, 1))) return true_positives / (predicted_positives + k.epsilon())
Example #16
Source File: nnet_survival.py From nnet-survival with MIT License | 5 votes |
def surv_likelihood_rnn(n_intervals): """Create custom Keras loss function for neural network survival model. Used for recurrent neural networks with time-distributed output. This function is very similar to surv_likelihood but deals with the extra dimension of y_true and y_pred that exists because of the time-distributed output. """ def loss(y_true, y_pred): cens_uncens = 1. + y_true[0,:,0:n_intervals] * (y_pred-1.) #component for all patients uncens = 1. - y_true[0,:,n_intervals:2*n_intervals] * y_pred #component for only uncensored patients return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood return loss
Example #17
Source File: attention.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, mask=None): """ convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim] """ multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q), self.num_heads, axis=2), axis=0) multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k), self.num_heads, axis=2), axis=0) multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v), self.num_heads, axis=2), axis=0) """scaled dot product""" scaled = K.int_shape(inputs)[-1] ** -0.5 attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled # apply mask before normalization (softmax) if mask is not None: multihead_mask = K.tile(mask, [self.num_heads, 1]) attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2) attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1) # normalization attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx()) # apply attention attend = K.batch_dot(attend, multihead_value, axes=(2, 1)) attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2) attend = K.dot(attend, self.w_final) if self.residual: attend = attend + inputs if self.normalize: mean = K.mean(attend, axis=-1, keepdims=True) std = K.mean(attend, axis=-1, keepdims=True) attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta return attend
Example #18
Source File: predictor.py From Deep-Channel with MIT License | 5 votes |
def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision
Example #19
Source File: run_clinicnet.py From CDSS with GNU General Public License v3.0 | 5 votes |
def precision(y_true, y_pred): K.set_epsilon(1e-05) #y_pred = tf.convert_to_tensor(y_pred, np.float32) #y_true = tf.convert_to_tensor(y_true, np.float32) true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision
Example #20
Source File: run_clinicnet.py From CDSS with GNU General Public License v3.0 | 5 votes |
def recall(y_true, y_pred): K.set_epsilon(1e-05) #y_pred = tf.convert_to_tensor(y_pred, np.float32) #y_true = tf.convert_to_tensor(y_true, np.float32) true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall
Example #21
Source File: predictor.py From Deep-Channel with MIT License | 5 votes |
def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon()))
Example #22
Source File: WCCE.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def weighted_categorical_crossentropy(weights): """ A weighted version of keras.objectives.categorical_crossentropy Variables: weights: numpy array of shape (C,) where C is the number of classes Usage: weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x. loss = weighted_categorical_crossentropy(weights) model.compile(loss=loss,optimizer='adam') """ weights = K.variable(weights) def loss(y_true, y_pred): # scale predictions so that the class probas of each sample sum to 1 y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # clip to prevent NaN's and Inf's y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon()) # calc loss = y_true * K.log(y_pred) * weights loss = -K.sum(loss, -1) return loss return loss
Example #23
Source File: run_clinicnet.py From CDSS with GNU General Public License v3.0 | 5 votes |
def f1_loss(y_true, y_pred): tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0) tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0) fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0) fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0) p = tp / (tp + fp + K.epsilon()) r = tp / (tp + fn + K.epsilon()) f1 = 2*p*r / (p+r+K.epsilon()) f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1) return 1 - K.mean(f1)
Example #24
Source File: predictor.py From Deep-Channel with MIT License | 5 votes |
def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall
Example #25
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def softmax_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25): """ Compute softmax focal loss. Reference Paper: "Focal Loss for Dense Object Detection" https://arxiv.org/abs/1708.02002 # Arguments y_true: Ground truth targets, tensor of shape (?, num_boxes, num_classes). y_pred: Predicted logits, tensor of shape (?, num_boxes, num_classes). gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. # Returns softmax_focal_loss: Softmax focal loss, tensor of shape (?, num_boxes). """ # Scale predictions so that the class probas of each sample sum to 1 #y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # Clip the prediction value to prevent NaN's and Inf's #epsilon = K.epsilon() #y_pred = K.clip(y_pred, epsilon, 1. - epsilon) y_pred = tf.nn.softmax(y_pred) y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15) # Calculate Cross Entropy cross_entropy = -y_true * tf.math.log(y_pred) # Calculate Focal Loss softmax_focal_loss = alpha * tf.pow(1 - y_pred, gamma) * cross_entropy return softmax_focal_loss
Example #26
Source File: train.py From object-localization with MIT License | 5 votes |
def loss(y_true, y_pred): def dice_coefficient(y_true, y_pred): numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=-1) denominator = tf.reduce_sum(y_true + y_pred, axis=-1) return numerator / (denominator + epsilon()) return binary_crossentropy(y_true, y_pred) - tf.math.log(dice_coefficient(y_true, y_pred) + epsilon())
Example #27
Source File: layers.py From deepchem with MIT License | 5 votes |
def _cosine_dist(x, y): """Computes the inner product (cosine distance) between two tensors. Parameters ---------- x: tf.Tensor Input Tensor y: tf.Tensor Input Tensor """ denom = (backend.sqrt(backend.sum(tf.square(x)) * backend.sum(tf.square(y))) + backend.epsilon()) return backend.dot(x, tf.transpose(y)) / denom
Example #28
Source File: layers.py From deepchem with MIT License | 5 votes |
def __init__(self, batch_size, n_input=128, gaussian_expand=False, init='glorot_uniform', activation='tanh', epsilon=1e-3, momentum=0.99, **kwargs): """ Parameters ---------- batch_size: int number of molecules in a batch n_input: int, optional number of features for each input molecule gaussian_expand: boolean. optional Whether to expand each dimension of atomic features by gaussian histogram init: str, optional Weight initialization for filters. activation: str, optional Activation function applied """ super(WeaveGather, self).__init__(**kwargs) self.n_input = n_input self.batch_size = batch_size self.gaussian_expand = gaussian_expand self.init = init # Set weight initialization self.activation = activation # Get activations self.activation_fn = activations.get(activation) self.epsilon = epsilon self.momentum = momentum
Example #29
Source File: layers.py From deepchem with MIT License | 5 votes |
def get_config(self): config = super(WeaveGather, self).get_config() config['batch_size'] = self.batch_size config['n_input'] = self.n_input config['gaussian_expand'] = self.gaussian_expand config['init'] = self.init config['activation'] = self.activation config['epsilon'] = self.epsilon config['momentum'] = self.momentum return config
Example #30
Source File: graph.py From spektral with MIT License | 5 votes |
def normalize_A(A): """ Computes symmetric normalization of A, dealing with sparse A and batch mode automatically. :param A: Tensor or SparseTensor with rank k = {2, 3}. :return: Tensor or SparseTensor of rank k. """ D = degrees(A) D = tf.sqrt(D)[:, None] + K.epsilon() perm = (0, 2, 1) if K.ndim(A) == 3 else (1, 0) output = (A / D) / ops.transpose(D, perm=perm) return output