Python tensorflow.python.ops.math_ops.to_int32() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.to_int32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: hybrid_model.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def loss(self, data, labels): """The loss to minimize while training.""" if self.is_regression: diff = self.training_inference_graph(data) - math_ops.to_float(labels) mean_squared_error = math_ops.reduce_mean(diff * diff) root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss") loss = root_mean_squared_error else: loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( labels=array_ops.squeeze(math_ops.to_int32(labels)), logits=self.training_inference_graph(data)), name="loss") if self.regularizer: loss += layers.apply_regularization(self.regularizer, variables.trainable_variables()) return loss
Example #2
Source File: hybrid_model.py From deep_image_model with Apache License 2.0 | 6 votes |
def loss(self, data, labels): """The loss to minimize while training.""" if self.is_regression: diff = self.training_inference_graph(data) - math_ops.to_float(labels) mean_squared_error = math_ops.reduce_mean(diff * diff) root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss") loss = root_mean_squared_error else: loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( self.training_inference_graph(data), array_ops.squeeze(math_ops.to_int32(labels))), name="loss") if self.regularizer: loss += layers.apply_regularization(self.regularizer, variables.trainable_variables()) return loss
Example #3
Source File: tensor_forest.py From deep_image_model with Apache License 2.0 | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #4
Source File: tensor_forest.py From deep-learning with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #5
Source File: tensor_forest.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #6
Source File: array_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _GatherGrad(op, grad): """Gradient for Gather op.""" # params can be large, so colocate the shape calculation with it. # # params can be very large for sparse model, array_ops.shape raises # exception on the Windows platform when any dimension is larger than # int32. params_shape is not used in optimizer apply_sparse gradients, # so it's fine to convert it back to int32 regardless of truncation. params = op.inputs[0] with ops.colocate_with(params): params_shape = array_ops.shape(params, out_type=ops.dtypes.int64) params_shape = math_ops.to_int32(params_shape) # Build appropriately shaped IndexedSlices indices = op.inputs[1] size = array_ops.expand_dims(array_ops.size(indices), 0) values_shape = array_ops.concat([size, params_shape[1:]], 0) values = array_ops.reshape(grad, values_shape) indices = array_ops.reshape(indices, size) return [ops.IndexedSlices(values, indices, params_shape), None]
Example #7
Source File: hybrid_model.py From keras-lambda with MIT License | 6 votes |
def loss(self, data, labels): """The loss to minimize while training.""" if self.is_regression: diff = self.training_inference_graph(data) - math_ops.to_float(labels) mean_squared_error = math_ops.reduce_mean(diff * diff) root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss") loss = root_mean_squared_error else: loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( labels=array_ops.squeeze(math_ops.to_int32(labels)), logits=self.training_inference_graph(data)), name="loss") if self.regularizer: loss += layers.apply_regularization(self.regularizer, variables.trainable_variables()) return loss
Example #8
Source File: tensor_forest.py From lambda-packs with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #9
Source File: tensor_forest.py From keras-lambda with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #10
Source File: hybrid_model.py From lambda-packs with MIT License | 6 votes |
def loss(self, data, labels): """The loss to minimize while training.""" if self.is_regression: diff = self.training_inference_graph(data) - math_ops.to_float(labels) mean_squared_error = math_ops.reduce_mean(diff * diff) root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss") loss = root_mean_squared_error else: loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( labels=array_ops.squeeze(math_ops.to_int32(labels)), logits=self.training_inference_graph(data)), name="loss") if self.regularizer: loss += layers.apply_regularization(self.regularizer, variables.trainable_variables()) return loss
Example #11
Source File: input.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _select_which_to_enqueue(tensor_list, keep_input): """Select which examples to enqueue based on vector `keep_input`.""" select_i = math_ops.to_int32(keep_input) tensor_list = [ data_flow_ops.dynamic_partition(x, select_i, num_partitions=2)[1] for x in tensor_list] return tensor_list
Example #12
Source File: eval_metrics.py From keras-lambda with MIT License | 5 votes |
def _top_k_generator(k): def _top_k(probabilities, targets): targets = math_ops.to_int32(targets) if targets.get_shape().ndims > 1: targets = array_ops.squeeze(targets, squeeze_dims=[1]) return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets, k)) return _top_k
Example #13
Source File: tensor_forest.py From keras-lambda with MIT License | 5 votes |
def one_hot_wrapper(num_classes, loss_fn): """Some loss functions take one-hot labels.""" def _loss(probs, targets): if targets.get_shape().ndims > 1: targets = array_ops.squeeze(targets, squeeze_dims=[1]) one_hot_labels = array_ops.one_hot( math_ops.to_int32(targets), num_classes, on_value=1., off_value=0., dtype=dtypes.float32) return loss_fn(probs, one_hot_labels) return _loss
Example #14
Source File: metric_loss_ops.py From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License | 5 votes |
def _compute_zeroone_score(labels, predictions): zeroone_score = math_ops.to_float( math_ops.equal( math_ops.reduce_sum( math_ops.to_int32(math_ops.equal(labels, predictions))), array_ops.shape(labels)[0])) return zeroone_score
Example #15
Source File: eval_metrics.py From keras-lambda with MIT License | 5 votes |
def _softmax_entropy(probabilities, targets, weights=None): return metric_ops.streaming_mean( losses.sparse_softmax_cross_entropy(probabilities, math_ops.to_int32(targets)), weights=weights)
Example #16
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def ctc_batch_cost(y_true, y_pred, input_length, label_length): """Runs CTC loss algorithm on each batch element. Arguments: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_pred`. label_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_true`. Returns: Tensor with shape (samples,1) containing the CTC loss of each element. """ label_length = math_ops.to_int32(array_ops.squeeze(label_length)) input_length = math_ops.to_int32(array_ops.squeeze(input_length)) sparse_labels = math_ops.to_int32( ctc_label_dense_to_sparse(y_true, label_length)) y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8) return array_ops.expand_dims( ctc.ctc_loss( inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
Example #17
Source File: eval_metrics.py From deep_image_model with Apache License 2.0 | 5 votes |
def _softmax_entropy(probabilities, targets, weights=None): return metric_ops.streaming_mean(losses.sparse_softmax_cross_entropy( probabilities, math_ops.to_int32(targets)), weights=weights)
Example #18
Source File: eval_metrics.py From deep_image_model with Apache License 2.0 | 5 votes |
def _squeeze_and_onehot(targets, depth): targets = array_ops.squeeze(targets, squeeze_dims=[1]) return array_ops.one_hot(math_ops.to_int32(targets), depth)
Example #19
Source File: tensor_forest.py From deep_image_model with Apache License 2.0 | 5 votes |
def one_hot_wrapper(num_classes, loss_fn): """Some loss functions take one-hot labels.""" def _loss(probs, targets): one_hot_labels = array_ops.one_hot( math_ops.to_int32(targets), num_classes, on_value=1., off_value=0., dtype=dtypes.float32) return loss_fn(probs, one_hot_labels) return _loss
Example #20
Source File: topn.py From keras-lambda with MIT License | 5 votes |
def get_best(self, n): """Return the indices and values of the n highest scores in the TopN.""" def refresh_shortlist(): """Update the shortlist with the highest scores in id_to_score.""" new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size) smallest_new_score = math_ops.reduce_min(new_scores) new_length = math_ops.reduce_sum( math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min))) u1 = self.sl_ids.assign( math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0))) u2 = self.sl_scores.assign( array_ops.concat([[smallest_new_score], new_scores], 0)) self.last_ops = [u1, u2] return control_flow_ops.group(u1, u2) # We only need to refresh the shortlist if n is greater than the # current shortlist size (which is stored in sl_ids[0]). with ops.control_dependencies(self.last_ops): cond_op = control_flow_ops.cond(n > self.sl_ids[0], refresh_shortlist, control_flow_ops.no_op) with ops.control_dependencies([cond_op]): topk_values, topk_indices = nn_ops.top_k( self.sl_scores, math_ops.minimum(n, math_ops.to_int32(self.sl_ids[0]))) # topk_indices are the indices into the shortlist, we want to return # the indices into id_to_score gathered_indices = array_ops.gather(self.sl_ids, topk_indices) return gathered_indices, topk_values
Example #21
Source File: misc.py From keras-lambda with MIT License | 5 votes |
def one_hot_mask(labels, num_classes, scope=None): """Compute 1-hot encodings for masks. Given a label image, this computes the one hot encoding at each pixel. Args: labels: (batch_size, width, height, 1) tensor containing labels. num_classes: number of classes scope: optional scope name Returns: Tensor of shape (batch_size, width, height, num_classes) with a 1-hot encoding. """ with ops.name_scope(scope, "OneHotMask", [labels]): height, width, depth = _shape(labels) assert depth == 1 sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1])) sparse_size, _ = _shape(sparse_labels) indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1]) concated = array_ops.concat([indices, sparse_labels], 1) dense_result = sparse_ops.sparse_to_dense(concated, [sparse_size, num_classes], 1.0, 0.0) result = array_ops.reshape(dense_result, [height, width, num_classes]) return result
Example #22
Source File: misc.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def one_hot_mask(labels, num_classes, scope=None): """Compute 1-hot encodings for masks. Given a label image, this computes the one hot encoding at each pixel. Args: labels: (batch_size, width, height, 1) tensor containing labels. num_classes: number of classes scope: optional scope name Returns: Tensor of shape (batch_size, width, height, num_classes) with a 1-hot encoding. """ with ops.name_scope(scope, "OneHotMask", [labels]): height, width, depth = _shape(labels) assert depth == 1 sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1])) sparse_size, _ = _shape(sparse_labels) indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1]) concated = array_ops.concat([indices, sparse_labels], 1) dense_result = sparse_ops.sparse_to_dense(concated, [sparse_size, num_classes], 1.0, 0.0) result = array_ops.reshape(dense_result, [height, width, num_classes]) return result
Example #23
Source File: eval_metrics.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _softmax_entropy(probabilities, targets, weights=None): return metric_ops.streaming_mean( losses.sparse_softmax_cross_entropy(probabilities, math_ops.to_int32(targets)), weights=weights)
Example #24
Source File: eval_metrics.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _top_k_generator(k): def _top_k(probabilities, targets): targets = math_ops.to_int32(targets) if targets.get_shape().ndims > 1: targets = array_ops.squeeze(targets, squeeze_dims=[1]) return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets, k)) return _top_k
Example #25
Source File: topn.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def get_best(self, n): """Return the indices and values of the n highest scores in the TopN.""" def refresh_shortlist(): """Update the shortlist with the highest scores in id_to_score.""" new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size) smallest_new_score = math_ops.reduce_min(new_scores) new_length = math_ops.reduce_sum( math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min))) u1 = self.sl_ids.assign( math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0))) u2 = self.sl_scores.assign( array_ops.concat([[smallest_new_score], new_scores], 0)) self.last_ops = [u1, u2] return control_flow_ops.group(u1, u2) # We only need to refresh the shortlist if n is greater than the # current shortlist size (which is stored in sl_ids[0]). with ops.control_dependencies(self.last_ops): cond_op = control_flow_ops.cond(n > self.sl_ids[0], refresh_shortlist, control_flow_ops.no_op) with ops.control_dependencies([cond_op]): topk_values, topk_indices = nn_ops.top_k( self.sl_scores, math_ops.minimum(n, math_ops.to_int32(self.sl_ids[0]))) # topk_indices are the indices into the shortlist, we want to return # the indices into id_to_score gathered_indices = array_ops.gather(self.sl_ids, topk_indices) return gathered_indices, topk_values
Example #26
Source File: tensor_forest.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def one_hot_wrapper(num_classes, loss_fn): """Some loss functions take one-hot labels.""" def _loss(probs, targets): if targets.get_shape().ndims > 1: targets = array_ops.squeeze(targets, squeeze_dims=[1]) one_hot_labels = array_ops.one_hot( math_ops.to_int32(targets), num_classes, on_value=1., off_value=0., dtype=dtypes.float32) return loss_fn(probs, one_hot_labels) return _loss
Example #27
Source File: head.py From lambda-packs with MIT License | 5 votes |
def _class_labels_streaming_mean(labels, weights, class_id): return metrics_lib.streaming_mean( array_ops.where( math_ops.equal( math_ops.to_int32(class_id), math_ops.to_int32(labels)), array_ops.ones_like(labels), array_ops.zeros_like(labels)), weights=weights)
Example #28
Source File: head.py From lambda-packs with MIT License | 5 votes |
def _class_predictions_streaming_mean(predictions, weights, class_id): return metrics_lib.streaming_mean( array_ops.where( math_ops.equal( math_ops.to_int32(class_id), math_ops.to_int32(predictions)), array_ops.ones_like(predictions), array_ops.zeros_like(predictions)), weights=weights)
Example #29
Source File: misc.py From lambda-packs with MIT License | 5 votes |
def one_hot_mask(labels, num_classes, scope=None): """Compute 1-hot encodings for masks. Given a label image, this computes the one hot encoding at each pixel. Args: labels: (batch_size, width, height, 1) tensor containing labels. num_classes: number of classes scope: optional scope name Returns: Tensor of shape (batch_size, width, height, num_classes) with a 1-hot encoding. """ with ops.name_scope(scope, "OneHotMask", [labels]): height, width, depth = _shape(labels) assert depth == 1 sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1])) sparse_size, _ = _shape(sparse_labels) indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1]) concated = array_ops.concat([indices, sparse_labels], 1) dense_result = sparse_ops.sparse_to_dense(concated, [sparse_size, num_classes], 1.0, 0.0) result = array_ops.reshape(dense_result, [height, width, num_classes]) return result
Example #30
Source File: eval_metrics.py From lambda-packs with MIT License | 5 votes |
def _softmax_entropy(probabilities, targets, weights=None): return metric_ops.streaming_mean( losses.sparse_softmax_cross_entropy(probabilities, math_ops.to_int32(targets)), weights=weights)