Python tensorflow.confusion_matrix() Examples
The following are 22
code examples of tensorflow.confusion_matrix().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: fcpn.py From fully-convolutional-point-network with MIT License | 6 votes |
def get_confusion_matrix_ops(self, predictions, labels, num_classes, unoccupied_class): """ Get ops for maintaining a confusion matrix during training. Args: predictions: tf.tensor labels: tf.tensor num_classes: int unoccupied_class: int, id of unoccupied class Returns: tf.tensor, tf.tensor, tf.tensor """ labels = tf.reshape(labels, [-1]) predictions_argmax = tf.reshape(tf.argmax(predictions, axis=2), [-1]) batch_confusion = tf.confusion_matrix(labels, predictions_argmax, num_classes=num_classes, name='batch_confusion') confusion = tf.Variable( tf.zeros([num_classes, num_classes], dtype=tf.int32 ), name='confusion' ) confusion_update = confusion.assign( confusion + batch_confusion ) confusion_clear = confusion.assign(tf.zeros([num_classes, num_classes], dtype=tf.int32)) return confusion, confusion_update, confusion_clear
Example #2
Source File: anytime_fcn.py From petridishnn with MIT License | 6 votes |
def compute_classification_callbacks(self): vcs = [] total_units = self.total_units unit_idx = -1 layer_idx=-1 for n_units in self.network_config.n_units_per_block: for k in range(n_units): layer_idx += 1 unit_idx += 1 weight = self.weights[unit_idx] if weight > 0: scope_name = self.compute_scope_basename(layer_idx) scope_name = self.prediction_scope(scope_name) + '/' vcs.append(MeanIoUFromConfusionMatrix(\ cm_name=scope_name+'confusion_matrix/SparseTensorDenseAdd:0', scope_name_prefix=scope_name+'val_')) vcs.append(WeightedTensorStats(\ names=[scope_name+'sum_abs_diff:0', scope_name+'prob_sqr_err:0', scope_name+'cross_entropy_loss:0'], weight_name='dynamic_batch_size:0', prefix='val_')) return vcs
Example #3
Source File: metrics.py From MultiPlanarUNet with MIT License | 6 votes |
def sparse_mean_fg_f1(y_true, y_pred): y_pred = tf.argmax(y_pred, axis=-1) # Get confusion matrix cm = tf.confusion_matrix(tf.reshape(y_true, [-1]), tf.reshape(y_pred, [-1])) # Get precisions TP = tf.diag_part(cm) precisions = TP / tf.reduce_sum(cm, axis=0) # Get recalls TP = tf.diag_part(cm) recalls = TP / tf.reduce_sum(cm, axis=1) # Get F1s f1s = (2 * precisions * recalls) / (precisions + recalls) return tf.reduce_mean(f1s[1:])
Example #4
Source File: base_gattn.py From GAT with MIT License | 5 votes |
def confmat(logits, labels): preds = tf.argmax(logits, axis=1) return tf.confusion_matrix(labels, preds) ########################## # Adapted from tkipf/gcn # ##########################
Example #5
Source File: GAT.py From OpenHINE with MIT License | 5 votes |
def confmat(logits, labels): preds = tf.argmax(logits, axis=1) return tf.confusion_matrix(labels, preds) ########################## # Adapted from tkipf/gcn # ##########################
Example #6
Source File: SanityModel.py From Tensorflow-Keyword-Spotting with Apache License 2.0 | 5 votes |
def get_confusion_matrix_correct_labels(self, ground_truth_input, logits, seq_len, audio_processor): predicted_indices = tf.argmax(logits, 1) correct_prediction = tf.equal(predicted_indices, ground_truth_input) confusion_matrix = tf.confusion_matrix(ground_truth_input, predicted_indices, num_classes=self.model_settings['label_count']) return predicted_indices,correct_prediction,confusion_matrix
Example #7
Source File: CTCModelLSTM.py From Tensorflow-Keyword-Spotting with Apache License 2.0 | 5 votes |
def get_confusion_matrix_correct_labels(self, ground_truth_input, logits, seq_len, audio_processor): predicted_indices_orig, _ = tf.nn.ctc_beam_search_decoder(logits, seq_len) predicted_indices = self.convert_indices_to_label(predicted_indices_orig[0], audio_processor) # call to utils tensor indices to label self.predicted_indices[0] correct_label = self.convert_indices_to_label(ground_truth_input, audio_processor) correct_prediction = tf.equal([predicted_indices], [correct_label]) confusion_matrix = tf.confusion_matrix([correct_label], [predicted_indices], num_classes=self.model_settings['label_count']) return predicted_indices,correct_prediction,confusion_matrix
Example #8
Source File: BaselineConv.py From Tensorflow-Keyword-Spotting with Apache License 2.0 | 5 votes |
def get_confusion_matrix_correct_labels(self, ground_truth_input, logits, seq_len, audio_processor): predicted_indices = tf.argmax(logits, 1) correct_prediction = tf.equal(predicted_indices, ground_truth_input) confusion_matrix = tf.confusion_matrix(ground_truth_input, predicted_indices, num_classes=self.model_settings['label_count']) return predicted_indices,correct_prediction,confusion_matrix
Example #9
Source File: VggNet.py From Tensorflow-Keyword-Spotting with Apache License 2.0 | 5 votes |
def get_confusion_matrix_correct_labels(self, ground_truth_input, logits, seq_len, audio_processor): predicted_indices = tf.argmax(logits, 1) correct_prediction = tf.equal(predicted_indices, ground_truth_input) confusion_matrix = tf.confusion_matrix(ground_truth_input, predicted_indices, num_classes=self.model_settings['label_count']) return predicted_indices,correct_prediction,confusion_matrix
Example #10
Source File: mnist.py From tf-matplotlib with MIT License | 5 votes |
def draw_confusion_matrix(matrix): '''Draw confusion matrix for MNIST.''' fig = tfmpl.create_figure(figsize=(7,7)) ax = fig.add_subplot(111) ax.set_title('Confusion matrix for MNIST classification') tfmpl.plots.confusion_matrix.draw( ax, matrix, axis_labels=['Digit ' + str(x) for x in range(10)], normalize=True ) return fig
Example #11
Source File: metrics.py From MultiPlanarUNet with MIT License | 5 votes |
def sparse_mean_fg_recall(y_true, y_pred): y_pred = tf.argmax(y_pred, axis=-1) # Get confusion matrix cm = tf.confusion_matrix(tf.reshape(y_true, [-1]), tf.reshape(y_pred, [-1])) # Get precisions TP = tf.diag_part(cm) recalls = TP / tf.reduce_sum(cm, axis=1) return tf.reduce_mean(recalls[1:])
Example #12
Source File: metrics.py From MultiPlanarUNet with MIT License | 5 votes |
def sparse_mean_fg_precision(y_true, y_pred): y_pred = tf.argmax(y_pred, axis=-1) # Get confusion matrix cm = tf.confusion_matrix(tf.reshape(y_true, [-1]), tf.reshape(y_pred, [-1])) # Get precisions TP = tf.diag_part(cm) precisions = TP / tf.reduce_sum(cm, axis=0) return tf.reduce_mean(precisions[1:])
Example #13
Source File: base_gattn.py From hetsann with Apache License 2.0 | 5 votes |
def confmat(logits, labels): preds = tf.argmax(logits, axis=1) return tf.confusion_matrix(labels, preds) ########################## # Adapted from tkipf/gcn # ##########################
Example #14
Source File: base_gattn.py From hetsann with Apache License 2.0 | 5 votes |
def confmat(logits, labels): preds = tf.argmax(logits, axis=1) return tf.confusion_matrix(labels, preds) ########################## # Adapted from tkipf/gcn # ##########################
Example #15
Source File: base_gattn.py From hetsann with Apache License 2.0 | 5 votes |
def confmat(logits, labels): preds = tf.argmax(logits, axis=1) return tf.confusion_matrix(labels, preds) ########################## # Adapted from tkipf/gcn # ##########################
Example #16
Source File: metrics.py From dynamic-training-bench with Mozilla Public License 2.0 | 5 votes |
def confusion_matrix_op(logits, labels, num_classes): """Creates the operation to build the confusion matrix between the predictions and the labels. The number of classes are required to build the matrix correctly. Args: logits: a [batch_size, 1,1, num_classes] tensor or a [batch_size, num_classes] tensor labels: a [batch_size] tensor Returns: confusion_matrix_op: the confusion matrix tf op """ with tf.variable_scope('confusion_matrix'): # handle fully convolutional classifiers logits_shape = logits.shape if len(logits_shape) == 4 and logits_shape[1:3] == [1, 1]: top_k_logits = tf.squeeze(logits, [1, 2]) else: top_k_logits = logits # Extract the predicted label (top-1) _, top_predicted_label = tf.nn.top_k(top_k_logits, k=1, sorted=False) # (batch_size, k) -> k = 1 -> (batch_size) top_predicted_label = tf.squeeze(top_predicted_label, axis=1) return tf.confusion_matrix( labels, top_predicted_label, num_classes=num_classes)
Example #17
Source File: model.py From TCML-tensorflow with MIT License | 5 votes |
def _calc_accuracy(self): with tf.name_scope("accuracy"): predictions = tf.argmax(self.last_vector, 2, name="predictions", output_type=tf.int32) labels = self.target_label correct_predictions = tf.equal(predictions, labels) accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") # self.confusion_matrix = tf.confusion_matrix(labels, predictions, num_classes=self.num_classes) return accuracy
Example #18
Source File: siamese_net.py From atec-nlp with MIT License | 5 votes |
def forward(self): if self._interaction == 'concat': self.out = tf.concat([self.out1, self.out2], axis=1, name="out") elif self._interaction == 'multiply': self.out = tf.multiply(self.out1, self.out2, name="out") fc = tf.layers.dense(self.out, 128, name='fc1', activation=tf.nn.relu) # self.scores = tf.layers.dense(self.fc, 1, activation=tf.nn.sigmoid) self.logits = tf.layers.dense(fc, 2, name='fc2') # self.y_pred = tf.round(tf.nn.sigmoid(self.logits), name="predictions") # pred class self.y_pred = tf.cast(tf.argmax(tf.nn.sigmoid(self.logits), 1, name="predictions"), tf.float32) with tf.name_scope("loss"): # [batch_size, num_classes] y = tf.one_hot(tf.cast(self.input_y, tf.int32), 2) cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=y) self.loss = tf.reduce_mean(cross_entropy) # self.loss = tf.losses.sigmoid_cross_entropy(logits=self.logits, multi_class_labels=y) # y = self.input_y # y_ = self.scores # self.loss = -tf.reduce_mean(pos_weight * y * tf.log(tf.clip_by_value(y_, 1e-10, 1.0)) # + (1-y) * tf.log(tf.clip_by_value(1-y_, 1e-10, 1.0))) # add l2 reg except bias anb BN variables. self.l2 = self._l2_reg_lambda * tf.reduce_sum( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if not ("noreg" in v.name or "bias" in v.name)]) self.loss += self.l2 # Accuracy computation is outside of this class. with tf.name_scope("metrics"): TP = tf.count_nonzero(self.input_y * self.y_pred, dtype=tf.float32) TN = tf.count_nonzero((self.input_y - 1) * (self.y_pred - 1), dtype=tf.float32) FP = tf.count_nonzero(self.y_pred * (self.input_y - 1), dtype=tf.float32) FN = tf.count_nonzero((self.y_pred - 1) * self.input_y, dtype=tf.float32) # tf.div like python2 division, tf.divide like python3 self.cm = tf.confusion_matrix(self.input_y, self.y_pred, name="confusion_matrix") self.acc = tf.divide(TP + TN, TP + TN + FP + FN, name="accuracy") self.precision = tf.divide(TP, TP + FP, name="precision") self.recall = tf.divide(TP, TP + FN, name="recall") self.f1 = tf.divide(2 * self.precision * self.recall, self.precision + self.recall, name="F1_score")
Example #19
Source File: test.py From seg-mentor with MIT License | 4 votes |
def iter_test(annotation, predictions, checkpoint, iterator, args, more_tensors_to_eval=[], callback=None, fd={}): """ iterate over the validation set, and compute overall (m)IoU metric(s) # Note tensors_to_eval & callback - a placeholder for additional operation(s) to be run in the context of each image, without changing this function code.. e.g. visualize images 20-25 """ annotation_b = tf.expand_dims(annotation, axis=0) # Mask out the irrelevant (a.k.a ambiguous a.k.a unlabeled etc.) pixels from evaluation - weights = tf.to_float(tf.less(annotation_b, args.num_classes)) # note labels clipped to be inside range for legit confusion matrix - # but that doesn't harm result thanks to masking by weights. labels_b_clipped = tf.clip_by_value(annotation_b, 0, args.num_classes - 1) miou, update_op = tf.metrics.mean_iou(predictions=predictions, labels=labels_b_clipped, num_classes=args.num_classes, weights=weights) conf_op = tf.confusion_matrix(tf.reshape(predictions, [-1]), tf.reshape(labels_b_clipped, [-1]), num_classes=args.num_classes, weights=tf.reshape(weights, [-1])) conf_mtx = np.zeros([args.num_classes]*2) initializer = tf.local_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(initializer) sess.run(iterator.initializer) saver.restore(sess, checkpoint) for i in range(args.num_images): _eval_res = sess.run([conf_op, update_op]+more_tensors_to_eval, feed_dict=fd) conf_tmp = _eval_res[0] if callback: # a placeholder to inject more functionality w.o. changing this func callback(i, _eval_res[2:]) conf_mtx += conf_tmp final_miou = sess.run(miou) print("Final mIoU for {0} images is {1:.2f}%".format(args.num_images, final_miou * 100)) print("\n\n ---- Breakup by class: ----") diag = conf_mtx.diagonal() err1 = conf_mtx.sum(axis=1) - conf_mtx.diagonal() err2 = conf_mtx.sum(axis=0) - conf_mtx.diagonal() iou = diag / (0.0 + diag + err1 + err2) # print(pascal_voc_lut) for i, x in enumerate(iou): print(args.clabel2cname[i]+': {0:.2f}%'.format(x*100)) print(np.mean(iou)) # just a sanity check to verify that it's the same as final_miou print(conf_mtx.sum(axis=1), conf_mtx.sum(axis=0))
Example #20
Source File: adversarial.py From Medical-Cross-Modality-Domain-Adaptation with MIT License | 4 votes |
def test_eval(self, sess, output_path, flip_correction = True): all_cm = np.zeros([self.num_cls, self.num_cls]) pred_folder = os.path.join(output_path, "dense_pred") try: os.makedirs(pred_folder) except: logging.info("prediction folder exists") self.test_pair_list = list(zip(self.test_label_list, self.test_nii_list)) sample_eval_list = [] # evaluation of each sample for idx_file, pair in enumerate(self.test_pair_list): sample_cm = np.zeros([self.num_cls, self.num_cls]) # confusion matrix for each sample label_fid = pair[0] nii_fid = pair[1] if not os.path.isfile(nii_fid): raise Exception("cannot find sample %s"%str(nii_fid)) raw = read_nii_image(nii_fid) raw_y = read_nii_image(label_fid) if flip_correction is True: raw = np.flip(raw, axis = 0) raw = np.flip(raw, axis = 1) raw_y = np.flip(raw_y, axis = 0) raw_y = np.flip(raw_y, axis = 1) tmp_y = np.zeros(raw_y.shape) frame_list = [kk for kk in range(1, raw.shape[2] - 1)] np.random.shuffle(frame_list) for ii in range( int( floor( raw.shape[2] // self.net.batch_size ) ) ): vol = np.zeros( [self.net.batch_size, raw_size[0], raw_size[1], raw_size[2]] ) slice_y = np.zeros( [self.net.batch_size, label_size[0], label_size[1]] ) for idx, jj in enumerate(frame_list[ ii * self.net.batch_size : (ii + 1) * self.net.batch_size ]): vol[idx, ...] = raw[ ..., jj -1: jj+2 ].copy() slice_y[idx,...] = raw_y[..., jj ].copy() vol_y = _label_decomp(self.num_cls, slice_y) pred, curr_conf_mat= sess.run([self.net.compact_pred, self.net.confusion_matrix], feed_dict =\ {self.net.ct: vol, self.net.ct_y: vol_y, self.net.keep_prob: 1.0, self.net.mr_front_bn : False,\ self.net.ct_front_bn: False}) for idx, jj in enumerate(frame_list[ii * self.net.batch_size: (ii + 1) * self.net.batch_size]): tmp_y[..., jj] = pred[idx, ...].copy() sample_cm += curr_conf_mat all_cm += sample_cm sample_dice = _dice(sample_cm) sample_jaccard = _jaccard(sample_cm) sample_eval_list.append((sample_dice, sample_jaccard)) subject_dice_list, subject_jaccard_list = self.sample_metric_stddev(sample_eval_list) np.savetxt(os.path.join(output_path, "cm.csv"), all_cm) return subject_dice_list, subject_jaccard_list
Example #21
Source File: adversarial.py From Medical-Cross-Modality-Domain-Adaptation with MIT License | 4 votes |
def output_minibatch_stats(self, sess, summary_writer, step, ct_batch, ct_batch_y, mr_batch, mr_batch_y, detail = False): """ minibatch stats for tensorboard observation """ if detail is not True: summary_str, summary_img = sess.run([\ self.scalar_summary_op, self.train_image_summary_op], feed_dict={\ self.net.ct_front_bn : False, self.net.mr_front_bn : False, self.net.joint_bn : False, self.net.cls_bn : False, self.net.mr: mr_batch, self.net.mr_y: mr_batch_y, self.net.ct: ct_batch, self.net.ct_y: ct_batch_y, self.net.keep_prob: 1.\ }) else: _, curr_conf_mat, summary_str, summary_img = sess.run([\ self.net.compact_pred, self.net.confusion_matrix, self.scalar_summary_op, self.train_image_summary_op], feed_dict={\ self.net.ct_front_bn : False, self.net.mr_front_bn : False, self.net.joint_bn : False, self.net.cls_bn : False, self.net.mr: mr_batch, self.net.mr_y: mr_batch_y, self.net.ct: ct_batch, self.net.ct_y: ct_batch_y, self.net.keep_prob: 1.\ }) _indicator_eval(curr_conf_mat) summary_writer.add_summary(summary_str, step) summary_writer.add_summary(summary_img, step) summary_writer.flush()
Example #22
Source File: siamese_net.py From atec-nlp with MIT License | 4 votes |
def forward(self): # out1_norm = tf.sqrt(tf.reduce_sum(tf.square(self.out1), 1)) # out2_norm = tf.sqrt(tf.reduce_sum(tf.square(self.out2), 1)) # self.distance = tf.sqrt(tf.reduce_sum(tf.square(self.out1 - self.out2), 1, keepdims=False)) distance = tf.norm(self.out1-self.out2, ord='euclidean', axis=1, keepdims=False, name='euc-distance') distance = tf.div(distance, tf.add(tf.norm(self.out1, 2, axis=1), tf.norm(self.out2, 2, axis=1))) self.sim_euc = tf.subtract(1.0, distance, name="euc") # self.sim = tf.reduce_sum(tf.multiply(self.out1, self.out2), 1) / tf.multiply(out1_norm, out2_norm) out1_norm = tf.nn.l2_normalize(self.out1, 1) # output = x / sqrt(max(sum(x**2), epsilon)) out2_norm = tf.nn.l2_normalize(self.out2, 1) self.sim_cos = tf.reduce_sum(tf.multiply(out1_norm, out2_norm), axis=1, name="cosine") # sim = exp(-||x1-x2||) range (0, 1] # self.sim_ma = tf.exp(-tf.reduce_sum(tf.abs(self.out1 - self.out2), 1), name="manhattan") self.sim_ma = tf.exp(-tf.norm(self.out1-self.out2, 1, 1), name="manhattan") if self._energy_func == 'euclidean': self.sim = self.sim_euc elif self._energy_func == 'cosine': self.sim = self.sim_cos elif self._energy_func == 'exp_manhattan': self.sim = self.sim_ma elif self._energy_func == 'combine': w = tf.Variable(1, dtype=tf.float32) self.sim = w * self.sim_euc + (1 - w) * self.sim_cos else: raise ValueError("Invalid energy function name.") self.y_pred = tf.cast(tf.greater(self.sim, self._pred_threshold), dtype=tf.float32, name="y_pred") with tf.name_scope("loss"): if self._loss_func == 'contrasive': self.loss = self.contrastive_loss(self.input_y, self.sim) elif self._loss_func == 'cross_entrophy': self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y, logits=self.sim)) # add l2 reg except bias anb BN variables. self.l2 = self._l2_reg_lambda * tf.reduce_sum( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if not ("noreg" in v.name or "bias" in v.name)]) self.loss += self.l2 if self._encoder_type != 'cnn' and self._rnn_encoder._use_attention: self.loss += tf.reduce_mean(self._rnn_encoder.P) # Accuracy computation is outside of this class. # self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y_pred, self.input_y), tf.float32), name="accuracy") TP = tf.count_nonzero(self.input_y * self.y_pred, dtype=tf.float32) TN = tf.count_nonzero((self.input_y - 1) * (self.y_pred - 1), dtype=tf.float32) FP = tf.count_nonzero(self.y_pred * (self.input_y - 1), dtype=tf.float32) FN = tf.count_nonzero((self.y_pred - 1) * self.input_y, dtype=tf.float32) # tf.div like python2 division, tf.divide like python3 self.acc = tf.divide(TP + TN, TP + TN + FP + FN, name="accuracy") self.precision = tf.divide(TP, TP + FP, name="precision") self.recall = tf.divide(TP, TP + FN, name="recall") self.cm = tf.confusion_matrix(self.input_y, self.y_pred, name="confusion_matrix") # tf.assert_equal(self.acc, self.acc_) # https://github.com/tensorflow/tensorflow/issues/15115, be careful! # _, self.acc = tf.metrics.accuracy(self.input_y, self.y_pred) # _, self.precision = tf.metrics.precision(self.input_y, self.y_pred, name='precision') # _, self.recall = tf.metrics.recall(self.input_y, self.y_pred, name='recall') self.f1 = tf.divide(2 * self.precision * self.recall, self.precision + self.recall, name="F1_score")