Python keras.backend.flatten() Examples
The following are 30
code examples of keras.backend.flatten().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: unet.py From BraTs with MIT License | 7 votes |
def dice_coef(y_true, y_pred, smooth=1.0): ''' Dice Coefficient Args: y_true (np.array): Ground Truth Heatmap (Label) y_pred (np.array): Prediction Heatmap ''' class_num = 2 for i in range(class_num): y_true_f = K.flatten(y_true[:,:,:,i]) y_pred_f = K.flatten(y_pred[:,:,:,i]) intersection = K.sum(y_true_f * y_pred_f) loss = ((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)) if i == 0: total_loss = loss else: total_loss = total_loss + loss total_loss = total_loss / class_num return total_loss
Example #2
Source File: losses.py From deep-learning-explorer with Apache License 2.0 | 6 votes |
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred): ''' Softmax cross-entropy loss function for pascal voc segmentation and models which do not perform softmax. tensorlow only ''' y_pred = KB.reshape(y_pred, (-1, KB.int_shape(y_pred)[-1])) log_softmax = tf.nn.log_softmax(y_pred) y_true = KB.one_hot(tf.to_int32(KB.flatten(y_true)), KB.int_shape(y_pred)[-1]+1) unpacked = tf.unstack(y_true, axis=-1) y_true = tf.stack(unpacked[:-1], axis=-1) cross_entropy = -KB.sum(y_true * log_softmax, axis=1) cross_entropy_mean = KB.mean(cross_entropy) return cross_entropy_mean
Example #3
Source File: learn_labelembedding.py From semantic-embeddings with MIT License | 6 votes |
def labelembed_model(base_model, num_classes, **kwargs): input_ = base_model.input embedding = base_model.output out = keras.layers.Activation('relu')(embedding) out = keras.layers.BatchNormalization(name = 'embedding_bn')(out) out1 = keras.layers.Dense(num_classes, name = 'prob')(out) out2 = keras.layers.Dense(num_classes, name = 'out2')(keras.layers.Lambda(lambda x: K.stop_gradient(x))(out)) cls_input_ = keras.layers.Input((1,), name = 'labels') cls_embedding_layer = keras.layers.Embedding(num_classes, num_classes, embeddings_initializer = 'identity', name = 'labelembeddings') cls_embedding = keras.layers.Flatten()(cls_embedding_layer(cls_input_)) loss = keras.layers.Lambda(lambda x: labelembed_loss(x[0], x[1], x[2], K.flatten(x[3]), num_classes = num_classes, **kwargs)[:,None], name = 'labelembed_loss')([out1, out2, cls_embedding, cls_input_]) return keras.models.Model([input_, cls_input_], [embedding, out1, loss])
Example #4
Source File: metrics.py From deeposlandia with MIT License | 6 votes |
def iou(actual, predicted): """Compute Intersection over Union statistic (i.e. Jaccard Index) See https://en.wikipedia.org/wiki/Jaccard_index Parameters ---------- actual : list Ground-truth labels predicted : list Predicted labels Returns ------- float Intersection over Union value """ actual = backend.flatten(actual) predicted = backend.flatten(predicted) intersection = backend.sum(actual * predicted) union = backend.sum(actual) + backend.sum(predicted) - intersection return 1.0 * intersection / union
Example #5
Source File: loss.py From Looking-to-Listen-at-the-Cocktail-Party with MIT License | 6 votes |
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,people_num=2): def loss_func(S_true,S_pred,gamma=gamma,beta=beta,people_num=people_num): sum_mtr = K.zeros_like(S_true[:,:,:,:,0]) for i in range(people_num): sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i]) for j in range(people_num): if i != j: sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j])) for i in range(people_num): for j in range(i+1,people_num): #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j]) #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j]) pass #sum = K.sum(K.maximum(K.flatten(sum_mtr),0)) loss = K.mean(K.flatten(sum_mtr)) return loss return loss_func
Example #6
Source File: model_loss.py From speech_separation with MIT License | 6 votes |
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2): def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker): sum_mtr = K.zeros_like(S_true[:,:,:,:,0]) for i in range(num_speaker): sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i]) for j in range(num_speaker): if i != j: sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j])) for i in range(num_speaker): for j in range(i+1,num_speaker): #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j]) #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j]) pass #sum = K.sum(K.maximum(K.flatten(sum_mtr),0)) loss = K.mean(K.flatten(sum_mtr)) return loss return loss_func
Example #7
Source File: loss.py From Looking-to-Listen-at-the-Cocktail-Party with MIT License | 6 votes |
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,people_num=2): def loss_func(S_true,S_pred,gamma=gamma,beta=beta,people_num=people_num): sum_mtr = K.zeros_like(S_true[:,:,:,:,0]) for i in range(people_num): sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i]) for j in range(people_num): if i != j: sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j])) for i in range(people_num): for j in range(i+1,people_num): #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j]) #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j]) pass #sum = K.sum(K.maximum(K.flatten(sum_mtr),0)) loss = K.mean(K.flatten(sum_mtr)) return loss return loss_func
Example #8
Source File: loss.py From Looking-to-Listen-at-the-Cocktail-Party with MIT License | 5 votes |
def audio_discriminate_loss(gamma=0.1,people_num=2): def loss_func(S_true,S_pred,gamma=gamma,people_num=people_num): sum = 0 for i in range(people_num): sum += K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,i])))) for j in range(people_num): if i != j: sum -= gamma*K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,j])))) loss = sum / (people_num*298*257*2) return loss return loss_func
Example #9
Source File: zf_unet_224_model.py From ZF_UNET_224_Pretrained_Model with GNU General Public License v3.0 | 5 votes |
def jacard_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0)
Example #10
Source File: zf_unet_224_model.py From ZF_UNET_224_Pretrained_Model with GNU General Public License v3.0 | 5 votes |
def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2.0 * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0)
Example #11
Source File: metrics.py From deeposlandia with MIT License | 5 votes |
def dice_coef(actual, predicted, eps=1e-3): """Dice coef See https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient Examples at: - https://github.com/jocicmarko/ultrasound-nerve-segmentation/blob/master/train.py#L23 - https://github.com/ZFTurbo/ZF_UNET_224_Pretrained_Model/blob/master/zf_unet_224_model.py#L36 Parameters ---------- actual : list Ground-truth labels predicted : list Predicted labels eps : float Epsilon value to add numerical stability Returns ------- float Dice coef value """ y_true_f = backend.flatten(actual) y_pred_f = backend.flatten(predicted) intersection = backend.sum(y_true_f * y_pred_f) return (2.0 * intersection + eps) / ( backend.sum(y_true_f) + backend.sum(y_pred_f) + eps )
Example #12
Source File: crf.py From keras-crf-layer with MIT License | 5 votes |
def call(self, inputs, mask=None, **kwargs): inputs, sequence_lengths = inputs self.sequence_lengths = K.flatten(sequence_lengths) y_pred = self.viterbi_decode(inputs, self.sequence_lengths) nb_classes = self.input_spec[0].shape[2] y_pred_one_hot = K.one_hot(y_pred, nb_classes) return K.in_train_phase(inputs, y_pred_one_hot)
Example #13
Source File: vae_common.py From keras_experiments with The Unlicense | 5 votes |
def vae_loss(self, x, x_decoded_mean_squash, z_mean, z_log_var): x = K.flatten(x) x_decoded_mean_squash = K.flatten(x_decoded_mean_squash) img_rows, img_cols = self._img_rows, self._img_cols # generative or reconstruction loss xent_loss = img_rows * img_cols * \ metrics.binary_crossentropy(x, x_decoded_mean_squash) # Kullback-Leibler divergence loss kl_loss = - 0.5 * K.mean( 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(xent_loss + kl_loss)
Example #14
Source File: metrics.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def hard_jacard_coef_border(y_true, y_pred, smooth=1e-3): # K.flatten(K.round(y_true[..., 0])) y_true_f = K.flatten(K.round(y_true[..., 1])) y_pred_f =K.flatten(K.round(y_pred[..., 1])) intersection = K.sum(y_true_f * y_pred_f) return 100.0 * (intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth)
Example #15
Source File: get_models.py From 3D-Medical-Segmentation-GAN with Apache License 2.0 | 5 votes |
def dice_coefficient(y_true, y_pred): smoothing_factor = 1 flat_y_true = K.flatten(y_true) flat_y_pred = K.flatten(y_pred) return (2. * K.sum(flat_y_true * flat_y_pred) + smoothing_factor) / (K.sum(flat_y_true) + K.sum(flat_y_pred) + smoothing_factor)
Example #16
Source File: metrics.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def hard_dice_coef_border(y_true, y_pred, smooth=1e-3): y_true_f = K.flatten(K.round(y_true[..., 1])) y_pred_f = K.flatten(K.round(y_pred[..., 1])) intersection = K.sum(y_true_f * y_pred_f) return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
Example #17
Source File: metrics.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def hard_jacard_coef_mask(y_true, y_pred, smooth=1e-3): # K.flatten(K.round(y_true[..., 0])) y_true_f = K.flatten(K.round(y_true[..., 0])) y_pred_f =K.flatten(K.round(y_pred[..., 0])) intersection = K.sum(y_true_f * y_pred_f) return 100.0 * (intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth)
Example #18
Source File: metrics.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def hard_dice_coef_mask(y_true, y_pred, smooth=1e-3): y_true_f = K.flatten(K.round(y_true[..., 0])) y_pred_f = K.flatten(K.round(y_pred[..., 0])) intersection = K.sum(y_true_f * y_pred_f) return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
Example #19
Source File: metrics.py From keras-unet with MIT License | 5 votes |
def iou(y_true, y_pred, smooth=1.): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth)
Example #20
Source File: train.py From u-net with MIT License | 5 votes |
def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f*y_true_f) + K.sum(y_pred_f*y_pred_f) + smooth)
Example #21
Source File: variational_autoencoder_deconv.py From pCVR with Apache License 2.0 | 5 votes |
def vae_loss(self, x, x_decoded_mean_squash): x = K.flatten(x) x_decoded_mean_squash = K.flatten(x_decoded_mean_squash) xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash) kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(xent_loss + kl_loss)
Example #22
Source File: ChainCRF.py From emnlp2017-bilstm-cnn-crf with Apache License 2.0 | 5 votes |
def batch_gather(reference, indices): ref_shape = K.shape(reference) batch_size = ref_shape[0] n_classes = ref_shape[1] flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices) return K.gather(K.flatten(reference), flat_indices)
Example #23
Source File: layers.py From delft with Apache License 2.0 | 5 votes |
def batch_gather(reference, indices): ref_shape = K.shape(reference) batch_size = ref_shape[0] n_classes = ref_shape[1] flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices) return K.gather(K.flatten(reference), flat_indices)
Example #24
Source File: LUNA_unet.py From Luna2016-Lung-Nodule-Detection with MIT License | 5 votes |
def dice_coef(y_true,y_pred): y_true = K.flatten(y_true) y_pred = K.flatten(y_pred) smooth = 0. intersection = K.sum(y_true*y_pred) return (2. * intersection + smooth) / (K.sum(y_true) + K.sum(y_pred) + smooth)
Example #25
Source File: keras_u_net.py From keras-u-net with MIT License | 5 votes |
def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
Example #26
Source File: roi_align_layer.py From maskrcnn with MIT License | 5 votes |
def call(self, inputs): features = inputs[0] rois = inputs[1] n_roi_boxes = K.shape(rois)[1] # roisには[0,0,0,0]のRoIも含むが、バッチ毎の要素数を合わせるため、そのまま処理する。 # crop_and_resizeの準備 # roisを0軸目を除き(バッチを示す次元を除き)、フラットにする。 roi_unstack = K.concatenate(tf.unstack(rois), axis=0) # roi_unstackの各roiに対応するバッチを指すindex batch_pos = K.flatten( K.repeat(K.reshape(K.arange(self.batch_size), [-1, 1]), n_roi_boxes)) # RoiAlignの代わりにcrop_and_resizeを利用。 # crop_and_resize内部でbilinear interporlationしてようなので、アルゴリズム的には同じっぽい crop_boxes = tf.image.crop_and_resize(features, roi_unstack, batch_pos, self.out_shape) # (N * n_rois, out_size, out_size, channels) # から # (N, n_rois, out_size, out_size, channels) # へ変換 crop_boxes = K.reshape(crop_boxes, [self.batch_size, n_roi_boxes] + self.out_shape + [-1]) log.tfprint(crop_boxes, "crop_boxes: ") return crop_boxes
Example #27
Source File: cnn_models_3d.py From spinalcordtoolbox with MIT License | 5 votes |
def dice_coefficient(y_true, y_pred, smooth=1.): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
Example #28
Source File: model.py From spinalcordtoolbox with MIT License | 5 votes |
def dice_coef(y_true, y_pred): """Dice coefficient specification :param y_true: ground truth. :param y_pred: predictions. """ dice_smooth_factor = 1.0 y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + dice_smooth_factor) / \ (K.sum(y_true_f) + K.sum(y_pred_f) + dice_smooth_factor)
Example #29
Source File: utils_get_disc_area.py From pOSAL with MIT License | 5 votes |
def dice_coef(y_true, y_pred): smooth = 1e-8 y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f*y_true_f) + K.sum(y_pred_f*y_pred_f) + smooth)
Example #30
Source File: train.py From ultrasound-nerve-segmentation with MIT License | 5 votes |
def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)