Python tensorflow.python.ops.array_ops.squeeze() Examples
The following are 30
code examples of tensorflow.python.ops.array_ops.squeeze().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.array_ops
, or try the search function
.
Example #1
Source File: head.py From lambda-packs with MIT License | 6 votes |
def _softmax_cross_entropy_loss(labels, logits, weights=None): with ops.name_scope( None, "softmax_cross_entropy_loss", (logits, labels,)) as name: labels = ops.convert_to_tensor(labels) # Check that we got integer for classification. if not labels.dtype.is_integer: raise ValueError("Labels dtype should be integer " "Instead got %s." % labels.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] labels. is_squeezed_labels = False # TODO(ptucker): This will break for dynamic shapes. if len(labels.get_shape()) == 2: labels = array_ops.squeeze(labels, squeeze_dims=(1,)) is_squeezed_labels = True loss = nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name=name) # Restore squeezed dimension, if necessary, so loss matches weights shape. if is_squeezed_labels: loss = array_ops.expand_dims(loss, axis=(1,)) return _compute_weighted_loss(loss, weights)
Example #2
Source File: boolean_mask.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ input_tensor = input_tensors[0] mask = input_tensors[1] if mask.get_shape().ndims > 1: mask = array_ops.squeeze(mask) if isinstance(input_tensor, sparse_tensor_py.SparseTensor): mask_fn = sparse_boolean_mask else: mask_fn = array_ops.boolean_mask # pylint: disable=not-callable return self.return_type(mask_fn(input_tensor, mask))
Example #3
Source File: pooling.py From lambda-packs with MIT License | 6 votes |
def call(self, inputs): # There is no TF op for 1D pooling, hence we make the inputs 4D. if self.data_format == 'channels_last': inputs = array_ops.expand_dims(inputs, 2) pool_shape = (1,) + self.pool_size + (1, 1) strides = (1,) + self.strides + (1, 1) data_format = 'NHWC' else: inputs = array_ops.expand_dims(inputs, 1) pool_shape = (1, 1) + self.pool_size + (1,) strides = (1, 1) + self.strides + (1,) data_format = 'NCHW' outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper(), data_format=data_format) if self.data_format == 'channels_last': return array_ops.squeeze(outputs, 2) else: return array_ops.squeeze(outputs, 1)
Example #4
Source File: gmm_ops.py From lambda-packs with MIT License | 6 votes |
def _define_partial_maximization_operation(self, shard_id, shard): """Computes the partial statistics of the means and covariances. Args: shard_id: current shard id. shard: current data shard, 1 X num_examples X dimensions. """ # Soft assignment of each data point to each of the two clusters. self._points_in_k[shard_id] = math_ops.reduce_sum( self._w[shard_id], 0, keep_dims=True) # Partial means. w_mul_x = array_ops.expand_dims( math_ops.matmul( self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True), 1) self._w_mul_x.append(w_mul_x) # Partial covariances. x = array_ops.concat([shard for _ in range(self._num_classes)], 0) x_trans = array_ops.transpose(x, perm=[0, 2, 1]) x_mul_w = array_ops.concat([ array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0) for k in range(self._num_classes) ], 0) self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
Example #5
Source File: tensor_forest.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #6
Source File: gmm_ops.py From lambda-packs with MIT License | 6 votes |
def _define_diag_covariance_probs(self, shard_id, shard): """Defines the diagonal covariance probabilities per example in a class. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. Returns a matrix num_examples * num_classes. """ # num_classes X 1 # TODO(xavigonzalvo): look into alternatives to log for # reparametrization of variance parameters. det_expanded = math_ops.reduce_sum( math_ops.log(self._covs + 1e-3), 1, keep_dims=True) diff = shard - self._means x2 = math_ops.square(diff) cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2) # num_classes X num_examples x2_cov = math_ops.matmul(x2, cov_expanded) x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2])) self._probs[shard_id] = -0.5 * ( math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) + array_ops.transpose(det_expanded) + x2_cov)
Example #7
Source File: gmm_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _define_partial_maximization_operation(self, shard_id, shard): """Computes the partial statistics of the means and covariances. Args: shard_id: current shard id. shard: current data shard, 1 X num_examples X dimensions. """ # Soft assignment of each data point to each of the two clusters. self._points_in_k[shard_id] = math_ops.reduce_sum( self._w[shard_id], 0, keep_dims=True) # Partial means. w_mul_x = array_ops.expand_dims( math_ops.matmul( self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True), 1) self._w_mul_x.append(w_mul_x) # Partial covariances. x = array_ops.concat([shard for _ in range(self._num_classes)], 0) x_trans = array_ops.transpose(x, perm=[0, 2, 1]) x_mul_w = array_ops.concat([ array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0) for k in range(self._num_classes) ], 0) self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
Example #8
Source File: gmm_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _define_diag_covariance_probs(self, shard_id, shard): """Defines the diagonal covariance probabilities per example in a class. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. Returns a matrix num_examples * num_classes. """ # num_classes X 1 # TODO(xavigonzalvo): look into alternatives to log for # reparametrization of variance parameters. det_expanded = math_ops.reduce_sum( math_ops.log(self._covs + 1e-3), 1, keep_dims=True) diff = shard - self._means x2 = math_ops.square(diff) cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2) # num_classes X num_examples x2_cov = math_ops.matmul(x2, cov_expanded) x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2])) self._probs[shard_id] = -0.5 * ( math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) + array_ops.transpose(det_expanded) + x2_cov)
Example #9
Source File: hybrid_model.py From lambda-packs with MIT License | 6 votes |
def loss(self, data, labels): """The loss to minimize while training.""" if self.is_regression: diff = self.training_inference_graph(data) - math_ops.to_float(labels) mean_squared_error = math_ops.reduce_mean(diff * diff) root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss") loss = root_mean_squared_error else: loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( labels=array_ops.squeeze(math_ops.to_int32(labels)), logits=self.training_inference_graph(data)), name="loss") if self.regularizer: loss += layers.apply_regularization(self.regularizer, variables.trainable_variables()) return loss
Example #10
Source File: pooling.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def call(self, inputs): # There is no TF op for 1D pooling, hence we make the inputs 4D. if self.data_format == 'channels_last': inputs = array_ops.expand_dims(inputs, 2) pool_shape = (1,) + self.pool_size + (1, 1) strides = (1,) + self.strides + (1, 1) data_format = 'NHWC' else: inputs = array_ops.expand_dims(inputs, 1) pool_shape = (1, 1) + self.pool_size + (1,) strides = (1, 1) + self.strides + (1,) data_format = 'NCHW' outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper(), data_format=data_format) if self.data_format == 'channels_last': return array_ops.squeeze(outputs, 2) else: return array_ops.squeeze(outputs, 1)
Example #11
Source File: tensor_forest.py From lambda-packs with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #12
Source File: boolean_mask.py From lambda-packs with MIT License | 6 votes |
def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ input_tensor = input_tensors[0] mask = input_tensors[1] if mask.get_shape().ndims > 1: mask = array_ops.squeeze(mask) if isinstance(input_tensor, sparse_tensor_py.SparseTensor): mask_fn = sparse_boolean_mask else: mask_fn = array_ops.boolean_mask # pylint: disable=not-callable return self.return_type(mask_fn(input_tensor, mask))
Example #13
Source File: tensor_forest.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def one_hot_wrapper(num_classes, loss_fn): """Some loss functions take one-hot labels.""" def _loss(probs, targets): if targets.get_shape().ndims > 1: targets = array_ops.squeeze(targets, squeeze_dims=[1]) one_hot_labels = array_ops.one_hot( math_ops.to_int32(targets), num_classes, on_value=1., off_value=0., dtype=dtypes.float32) return loss_fn(probs, one_hot_labels) return _loss
Example #14
Source File: target_column.py From lambda-packs with MIT License | 5 votes |
def _streaming_at_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, labels, weights=None): precision_tensor, update_op = streaming_metrics_fn( predictions, labels=labels, thresholds=[threshold], weights=_float_weights_or_none(weights)) return array_ops.squeeze(precision_tensor), update_op return _streaming_metrics
Example #15
Source File: crf.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def crf_log_norm(inputs, sequence_lengths, transition_params): """Computes the normalization for a CRF. Args: inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials to use as input to the CRF layer. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] transition matrix. Returns: log_norm: A [batch_size] vector of normalizers for a CRF. """ # Split up the first and rest of the inputs in preparation for the forward # algorithm. first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1]) first_input = array_ops.squeeze(first_input, [1]) rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1]) # Compute the alpha values in the forward algorithm in order to get the # partition function. forward_cell = CrfForwardRnnCell(transition_params) _, alphas = rnn.dynamic_rnn( cell=forward_cell, inputs=rest_of_input, sequence_length=sequence_lengths - 1, initial_state=first_input, dtype=dtypes.float32) log_norm = math_ops.reduce_logsumexp(alphas, [1]) return log_norm
Example #16
Source File: eval_metrics.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _squeeze_and_onehot(targets, depth): targets = array_ops.squeeze(targets, squeeze_dims=[1]) return array_ops.one_hot(math_ops.to_int32(targets), depth)
Example #17
Source File: fully_connected.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def inference_graph(self, data): with ops.device(self.device_assigner.get_device(self.layer_num)): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, 1) # There is always one activation per instance by definition, so squeeze # away the extra dimension. return array_ops.squeeze(nn_activations, squeeze_dims=[1])
Example #18
Source File: head.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _streaming_at_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, labels, weights=None): precision_tensor, update_op = streaming_metrics_fn( predictions, labels=labels, thresholds=(threshold,), weights=_float_weights_or_none(weights)) return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op) return _streaming_metrics
Example #19
Source File: Generate.py From YouTubeCommenter with MIT License | 5 votes |
def new_sparse_categorical_accuracy(y_true, y_pred): y_pred_rank = ops.convert_to_tensor(y_pred).get_shape().ndims y_true_rank = ops.convert_to_tensor(y_true).get_shape().ndims # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None) and (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))): y_true = array_ops.squeeze(y_true, [-1]) y_pred = math_ops.argmax(y_pred, axis=-1) # If the predicted output and actual output types don't match, force cast them # to match. if K.dtype(y_pred) != K.dtype(y_true): y_pred = math_ops.cast(y_pred, K.dtype(y_true)) return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx()) #Load the model
Example #20
Source File: tensor_forest.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def tree_initialization(self): def _init_tree(): return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op def _nothing(): return control_flow_ops.no_op() return control_flow_ops.cond( math_ops.equal( array_ops.squeeze( array_ops.strided_slice(self.variables.tree, [0, 0], [1, 1])), -2), _init_tree, _nothing)
Example #21
Source File: loss_ops.py From lambda-packs with MIT License | 5 votes |
def compute_weighted_loss(losses, weights=1.0, scope=None): """Computes the weighted loss. Args: losses: A tensor of size [batch_size, d1, ... dN]. weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. scope: the scope for the operations performed in computing the loss. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If `weights` is `None` or the shape is not compatible with `losses`, or if the number of dimensions (rank) of either `losses` or `weights` is missing. """ with ops.name_scope(scope, "weighted_loss", [losses, weights]): losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype losses = math_ops.to_float(losses) weights = math_ops.to_float(ops.convert_to_tensor(weights)) if losses.get_shape().ndims is None: raise ValueError("losses.get_shape().ndims cannot be None") weights_shape = weights.get_shape() if weights_shape.ndims is None: raise ValueError("weights.get_shape().ndims cannot be None") if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1): weights = array_ops.squeeze(weights, [-1]) total_loss = _scale_losses(losses, weights) num_present = _num_present(losses, weights) mean_loss = _safe_mean(total_loss, num_present) # convert the result back to the input type mean_loss = math_ops.cast(mean_loss, input_dtype) add_loss(mean_loss) return mean_loss
Example #22
Source File: linear_operator.py From lambda-packs with MIT License | 5 votes |
def _solvevec(self, rhs, adjoint=False): """Default implementation of _solvevec.""" rhs_mat = array_ops.expand_dims(rhs, axis=-1) solution_mat = self.solve(rhs_mat, adjoint=adjoint) return array_ops.squeeze(solution_mat, axis=-1)
Example #23
Source File: linear_operator.py From lambda-packs with MIT License | 5 votes |
def _matvec(self, x, adjoint=False): x_mat = array_ops.expand_dims(x, axis=-1) y_mat = self.matmul(x_mat, adjoint=adjoint) return array_ops.squeeze(y_mat, axis=-1)
Example #24
Source File: target_column.py From lambda-packs with MIT License | 5 votes |
def _softmax_cross_entropy_loss(logits, target): # Check that we got integer for classification. if not target.dtype.is_integer: raise ValueError("Target's dtype should be integer " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=logits) return loss_vec
Example #25
Source File: target_column.py From lambda-packs with MIT License | 5 votes |
def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: return array_ops.squeeze(logits, squeeze_dims=[1]) return logits
Example #26
Source File: target_column.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: return array_ops.squeeze(logits, squeeze_dims=[1]) return logits
Example #27
Source File: head.py From lambda-packs with MIT License | 5 votes |
def _streaming_recall_at_threshold(predictions, labels, weights, threshold): precision_tensor, update_op = metrics_lib.streaming_recall_at_thresholds( predictions, labels=labels, thresholds=(threshold,), weights=_float_weights_or_none(weights)) return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
Example #28
Source File: head.py From lambda-packs with MIT License | 5 votes |
def _streaming_precision_at_threshold(predictions, labels, weights, threshold): precision_tensor, update_op = metrics_lib.streaming_precision_at_thresholds( predictions, labels=labels, thresholds=(threshold,), weights=_float_weights_or_none(weights)) return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
Example #29
Source File: tensor_forest.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def get_stats(self, session): num_nodes = self.variables.end_of_tree.eval(session=session) - 1 num_leaves = array_ops.where( math_ops.equal(array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE) ).eval(session=session).shape[0] return TreeStats(num_nodes, num_leaves)
Example #30
Source File: target_column.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _softmax_cross_entropy_loss(logits, target): # Check that we got integer for classification. if not target.dtype.is_integer: raise ValueError("Target's dtype should be integer " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=logits) return loss_vec