Python tensorflow.verify_tensor_all_finite() Examples
The following are 21
code examples of tensorflow.verify_tensor_all_finite().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: metrics.py From neuron with GNU General Public License v3.0 | 6 votes |
def mean_dice(self, y_true, y_pred): """ weighted mean dice across all patches and labels """ # compute dice, which will now be [batch_size, nb_labels] dice_metric = self.dice(y_true, y_pred) # weigh the entries in the dice matrix: if self.weights is not None: dice_metric *= self.weights if self.vox_weights is not None: dice_metric *= self.vox_weights # return one minus mean dice as loss mean_dice_metric = K.mean(dice_metric) tf.verify_tensor_all_finite(mean_dice_metric, 'metric not finite') return mean_dice_metric
Example #2
Source File: metrics.py From neuron with GNU General Public License v3.0 | 6 votes |
def loss(self, y_true, y_pred): """ the loss. Assumes y_pred is prob (in [0,1] and sum_row = 1) """ # compute dice, which will now be [batch_size, nb_labels] dice_metric = self.dice(y_true, y_pred) # loss dice_loss = 1 - dice_metric # weigh the entries in the dice matrix: if self.weights is not None: dice_loss *= self.weights # return one minus mean dice as loss mean_dice_loss = K.mean(dice_loss) tf.verify_tensor_all_finite(mean_dice_loss, 'Loss not finite') return mean_dice_loss
Example #3
Source File: metrics.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def mean_dice(self, y_true, y_pred): """ weighted mean dice across all patches and labels """ # compute dice, which will now be [batch_size, nb_labels] dice_metric = self.dice(y_true, y_pred) # weigh the entries in the dice matrix: if self.weights is not None: dice_metric *= self.weights if self.vox_weights is not None: dice_metric *= self.vox_weights # return one minus mean dice as loss mean_dice_metric = K.mean(dice_metric) tf.verify_tensor_all_finite(mean_dice_metric, 'metric not finite') return mean_dice_metric
Example #4
Source File: metrics.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def loss(self, y_true, y_pred): """ the loss. Assumes y_pred is prob (in [0,1] and sum_row = 1) """ # compute dice, which will now be [batch_size, nb_labels] dice_metric = self.dice(y_true, y_pred) # loss dice_loss = 1 - dice_metric # weigh the entries in the dice matrix: if self.weights is not None: dice_loss *= self.weights # return one minus mean dice as loss mean_dice_loss = K.mean(dice_loss) tf.verify_tensor_all_finite(mean_dice_loss, 'Loss not finite') return mean_dice_loss
Example #5
Source File: numerics_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testVerifyTensorAllFiniteFails(self): x_shape = [5, 4] x = np.random.random_sample(x_shape).astype(np.float32) my_msg = "Input is not a number." # Test NaN. x[0] = np.nan with self.test_session(use_gpu=True): with self.assertRaisesOpError(my_msg): t = tf.constant(x, shape=x_shape, dtype=tf.float32) t_verified = tf.verify_tensor_all_finite(t, my_msg) t_verified.eval() # Test Inf. x[0] = np.inf with self.test_session(use_gpu=True): with self.assertRaisesOpError(my_msg): t = tf.constant(x, shape=x_shape, dtype=tf.float32) t_verified = tf.verify_tensor_all_finite(t, my_msg) t_verified.eval()
Example #6
Source File: model.py From ffn with Apache License 2.0 | 5 votes |
def set_up_sigmoid_pixelwise_loss(self, logits): """Sets up the loss function of the model.""" assert self.labels is not None assert self.loss_weights is not None pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.labels) pixel_loss *= self.loss_weights self.loss = tf.reduce_mean(pixel_loss) tf.summary.scalar('pixel_loss', self.loss) self.loss = tf.verify_tensor_all_finite(self.loss, 'Invalid loss detected')
Example #7
Source File: network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _build_graph(self): self._add_placeholders() _logits, self._predictions = self._build_body() _weights = tf.expand_dims(self._tgt_weights, -1) _loss_tensor = \ tf.losses.sparse_softmax_cross_entropy(logits=_logits, labels=self._decoder_outputs, weights=_weights, reduction=tf.losses.Reduction.NONE) # normalize loss by batch_size _loss_tensor = \ tf.verify_tensor_all_finite(_loss_tensor, "Non finite values in loss tensor.") self._loss = tf.reduce_sum(_loss_tensor) / tf.cast(self._batch_size, tf.float32) # self._loss = tf.reduce_mean(_loss_tensor, name='loss') # TODO: tune clip_norm self._train_op = \ self.get_train_op(self._loss, learning_rate=self._learning_rate, optimizer=self._optimizer, clip_norm=2.) # log.info("Trainable variables") # for v in tf.trainable_variables(): # log.info(v) # self.print_number_of_parameters()
Example #8
Source File: dm_arch.py From deep-makeover with MIT License | 5 votes |
def add_softmax(self): """Adds a softmax operation to this model""" this_input = tf.square(self.get_output()) reduction_indices = list(range(1, len(this_input.get_shape()))) acc = tf.reduce_sum(this_input, reduction_indices=reduction_indices, keep_dims=True) out = this_input / (acc+FLAGS.epsilon) #out = tf.verify_tensor_all_finite(out, "add_softmax failed; is sum equal to zero?") self.outputs.append(out) return self
Example #9
Source File: tensorcheck.py From in-silico-labeling with Apache License 2.0 | 5 votes |
def well_defined(): """A decorator which checks function argument tensors. Checked tensors must have the same shape at graph runtime as they had at graph construction time. Checked tensors must contain only finite values. This calls either tf.verify_tensor_all_finite or lt.verify_tensor_all_finite on all input tf.Tensors and lt.LabeledTensors. Returns: A function to use as a decorator. """ def check(f): """Check the inputs.""" # TODO(ericmc): Should we also check kwds? @functools.wraps(f) def new_f(*args, **kwds): """A helper function.""" new_args = [] for a in args: float_types = [tf.float16, tf.float32, tf.float64] if isinstance(a, tf.Tensor): new_a = shape_unlabeled(a) if a.dtype in float_types: new_a = tf.verify_tensor_all_finite(new_a, msg='') elif isinstance(a, lt.LabeledTensor): new_a = shape(a) if a.tensor.dtype in float_types: new_a = lt.verify_tensor_all_finite(new_a, message='') else: new_a = a new_args.append(new_a) return f(*new_args, **kwds) return new_f return check
Example #10
Source File: seq_batch.py From lang2program with Apache License 2.0 | 5 votes |
def embed(sequence_batch, embeds): mask = sequence_batch.mask embedded_values = tf.gather(embeds, sequence_batch.values) embedded_values = tf.verify_tensor_all_finite(embedded_values, 'embedded_values') # set all pad embeddings to zero broadcasted_mask = expand_dims_for_broadcast(mask, embedded_values) embedded_values *= broadcasted_mask return SequenceBatch(embedded_values, mask)
Example #11
Source File: parse_model.py From lang2program with Apache License 2.0 | 5 votes |
def __init__(self, rnn_states, type_embedder, name='DelexicalizedDynamicPredicateEmbedder'): """Construct DelexicalizedDynamicPredicateEmbedder. Args: rnn_states (SequenceBatch): of shape (num_contexts, seq_length, rnn_state_dim) type_embedder (TokenEmbedder) name (str) """ self._type_embedder = type_embedder with tf.name_scope(name): # column indices of rnn_states (indexes time) self._col_indices = FeedSequenceBatch() # (num_predicates, max_predicate_mentions) # row indices of rnn_states (indexes utterance) self._row_indices = tf.placeholder(dtype=tf.int32, shape=[None]) # (num_predicates,) row_indices_expanded = expand_dims_for_broadcast(self._row_indices, self._col_indices.values) # (num_predicates, max_predicate_mentions, rnn_state_dim) rnn_states_selected = SequenceBatch( gather_2d(rnn_states.values, row_indices_expanded, self._col_indices.values), self._col_indices.mask) # (num_predicates, rnn_state_dim) rnn_embeds = reduce_mean(rnn_states_selected, allow_empty=True) rnn_embeds = tf.verify_tensor_all_finite(rnn_embeds, "RNN-state-based embeddings") self._type_seq_embedder = MeanSequenceEmbedder(type_embedder.embeds, name='TypeEmbedder') self._embeds = tf.concat(1, [rnn_embeds, self._type_seq_embedder.embeds])
Example #12
Source File: seq_batch.py From lang2program with Apache License 2.0 | 5 votes |
def embed(sequence_batch, embeds): mask = sequence_batch.mask embedded_values = tf.gather(embeds, sequence_batch.values) embedded_values = tf.verify_tensor_all_finite(embedded_values, 'embedded_values') # set all pad embeddings to zero broadcasted_mask = expand_dims_for_broadcast(mask, embedded_values) embedded_values *= broadcasted_mask return SequenceBatch(embedded_values, mask)
Example #13
Source File: variational_inference.py From proximity_vi with MIT License | 5 votes |
def build_summary_op(self): cfg = self.config self.saver = tf.train.Saver(max_to_keep=5) self.summary_writer = tf.summary.FileWriter( cfg['log/dir'], self.session.graph, flush_secs=2) assert_op = tf.verify_tensor_all_finite(self.elbo_sum, 'ELBO check') with tf.control_dependencies([assert_op]): self.summary_op = tf.summary.merge_all()
Example #14
Source File: numerics_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testVerifyTensorAllFiniteSucceeds(self): x_shape = [5, 4] x = np.random.random_sample(x_shape).astype(np.float32) with self.test_session(use_gpu=True): t = tf.constant(x, shape=x_shape, dtype=tf.float32) t_verified = tf.verify_tensor_all_finite(t, "Input is not a number.") self.assertAllClose(x, t_verified.eval())
Example #15
Source File: metrics.py From voxelmorph with GNU General Public License v3.0 | 5 votes |
def loss(self, y_true, y_pred): """ categorical crossentropy loss """ if self.crop_indices is not None: y_true = utils.batch_gather(y_true, self.crop_indices) y_pred = utils.batch_gather(y_pred, self.crop_indices) if self.use_float16: y_true = K.cast(y_true, 'float16') y_pred = K.cast(y_pred, 'float16') # scale and clip probabilities # this should not be necessary for softmax output. y_pred /= K.sum(y_pred, axis=-1, keepdims=True) y_pred = K.clip(y_pred, K.epsilon(), 1) # compute log probability log_post = K.log(y_pred) # likelihood # loss loss = - y_true * log_post # weighted loss if self.weights is not None: loss *= self.weights if self.vox_weights is not None: loss *= self.vox_weights # take the total loss # loss = K.batch_flatten(loss) mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1)) tf.verify_tensor_all_finite(mloss, 'Loss not finite') return mloss
Example #16
Source File: metrics.py From neuron with GNU General Public License v3.0 | 5 votes |
def loss(self, y_true, y_pred): """ categorical crossentropy loss """ if self.crop_indices is not None: y_true = utils.batch_gather(y_true, self.crop_indices) y_pred = utils.batch_gather(y_pred, self.crop_indices) if self.use_float16: y_true = K.cast(y_true, 'float16') y_pred = K.cast(y_pred, 'float16') # scale and clip probabilities # this should not be necessary for softmax output. y_pred /= K.sum(y_pred, axis=-1, keepdims=True) y_pred = K.clip(y_pred, K.epsilon(), 1) # compute log probability log_post = K.log(y_pred) # likelihood # loss loss = - y_true * log_post # weighted loss if self.weights is not None: loss *= self.weights if self.vox_weights is not None: loss *= self.vox_weights # take the total loss # loss = K.batch_flatten(loss) mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1)) tf.verify_tensor_all_finite(mloss, 'Loss not finite') return mloss
Example #17
Source File: core.py From Deep-Learning-with-TensorFlow-Second-Edition with MIT License | 5 votes |
def init_target(self): with tf.name_scope('target') as scope: self.target = self.reduced_loss + self.reg * self.regularization self.checked_target = tf.verify_tensor_all_finite( self.target, msg='NaN or Inf in target value', name='target') tf.summary.scalar('target', self.checked_target)
Example #18
Source File: core.py From Deep-Learning-with-TensorFlow-Second-Edition with MIT License | 5 votes |
def init_learnable_params(self): self.w = [None] * self.order for i in range(1, self.order + 1): r = self.rank if i == 1: r = 1 rnd_weights = tf.random_uniform([self.n_features, r], -self.init_std, self.init_std) self.w[i - 1] = tf.verify_tensor_all_finite( tf.Variable(rnd_weights, trainable=True, name='embedding_' + str(i)), msg='NaN or Inf in w[{}].'.format(i-1)) self.b = tf.Variable(self.init_std, trainable=True, name='bias') tf.summary.scalar('bias', self.b)
Example #19
Source File: srez_model.py From srez with MIT License | 5 votes |
def add_softmax(self): """Adds a softmax operation to this model""" with tf.variable_scope(self._get_layer_str()): this_input = tf.square(self.get_output()) reduction_indices = list(range(1, len(this_input.get_shape()))) acc = tf.reduce_sum(this_input, reduction_indices=reduction_indices, keep_dims=True) out = this_input / (acc+FLAGS.epsilon) #out = tf.verify_tensor_all_finite(out, "add_softmax failed; is sum equal to zero?") self.outputs.append(out) return self
Example #20
Source File: parse_model.py From lang2program with Apache License 2.0 | 4 votes |
def __init__(self, simple_scorer, attention_scorer, soft_copy_scorer): """ Args: simple_scorer (SimplePredicateScorer) attention_scorer (AttentionPredicateScorer) soft_copy_scorer (SoftCopyPredicateScorer) """ assert isinstance(simple_scorer, SimplePredicateScorer) assert isinstance(attention_scorer, AttentionPredicateScorer) assert isinstance(soft_copy_scorer, SoftCopyPredicateScorer) simple_scores = simple_scorer.scores # (batch_size, num_candidates) attention_scores = attention_scorer.scores # (batch_size, num_candidates) soft_copy_scores = soft_copy_scorer.scores # (batch_size, num_candidates) # check that Tensors are finite def verify_finite_inside_mask(scores, msg): finite_scores = scores.with_pad_value(0).values assert_op = tf.verify_tensor_all_finite(finite_scores, msg) return assert_op with tf.control_dependencies([ verify_finite_inside_mask(simple_scores, 'simple_scores'), verify_finite_inside_mask(attention_scores, 'attention_scores'), verify_finite_inside_mask(soft_copy_scores, 'soft copy scores'), ]): scores = SequenceBatch( simple_scores.values + attention_scores.values + soft_copy_scores.values, simple_scores.mask) subscores = SequenceBatch( tf.pack( [simple_scores.values, attention_scores.values, soft_copy_scores.values], axis=2), simple_scores.mask) scores = scores.with_pad_value(-float('inf')) probs = SequenceBatch(tf.nn.softmax(scores.values), scores.mask) self._scores = scores self._subscores = subscores self._probs = probs self._simple_scorer = simple_scorer self._attention_scorer = attention_scorer self._soft_copy_scorer = soft_copy_scorer
Example #21
Source File: loss.py From dca with Apache License 2.0 | 4 votes |
def loss(self, y_true, y_pred, mean=True): scale_factor = self.scale_factor eps = self.eps with tf.name_scope(self.scope): y_true = tf.cast(y_true, tf.float32) y_pred = tf.cast(y_pred, tf.float32) * scale_factor if self.masking: nelem = _nelem(y_true) y_true = _nan2zero(y_true) # Clip theta theta = tf.minimum(self.theta, 1e6) t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps) t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps))) if self.debug: assert_ops = [ tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'), tf.verify_tensor_all_finite(t1, 't1 has inf/nans'), tf.verify_tensor_all_finite(t2, 't2 has inf/nans')] tf.summary.histogram('t1', t1) tf.summary.histogram('t2', t2) with tf.control_dependencies(assert_ops): final = t1 + t2 else: final = t1 + t2 final = _nan2inf(final) if mean: if self.masking: final = tf.divide(tf.reduce_sum(final), nelem) else: final = tf.reduce_mean(final) return final