Python tensorflow.compat.v1.reduce_max() Examples
The following are 30
code examples of tensorflow.compat.v1.reduce_max().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: utils.py From lamb with Apache License 2.0 | 6 votes |
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None): """Convert a length scalar to a vector of binary masks. This function will convert a vector of lengths to a matrix of binary masks. E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]] Args: lengths: a d-dimensional vector of integers corresponding to lengths. max_length: an optional (default: None) scalar-like or 0-dimensional tensor indicating the maximum length of the masks. If not provided, the maximum length will be inferred from the lengths vector. dtype: the dtype of the returned mask, if specified. If None, the dtype of the lengths will be used. name: a name for the operation (optional). Returns: A d x max_length tensor of binary masks (int32). """ with tf.name_scope(name, 'mask_from_lengths'): dtype = lengths.dtype if dtype is None else dtype max_length = tf.reduce_max(lengths) if max_length is None else max_length indexes = tf.range(max_length, dtype=lengths.dtype) mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1)) cast_mask = tf.cast(mask, dtype) return tf.stop_gradient(cast_mask)
Example #2
Source File: tensor_utils.py From language with Apache License 2.0 | 6 votes |
def batch_boolean_mask(mask): """Get indices of true values. Args: mask: [batch_size, num_values] Returns: true_indices: [batch_size, max_true] gathered_mask: [batch_size, max_true] """ # [batch_size, num_values] mask = tf.to_int32(mask) # [batch_size] num_true = tf.reduce_sum(mask, 1) # [] max_true = tf.reduce_max(num_true) # [batch_size, max_true] gathered_mask, true_indices = tf.nn.top_k(mask, max_true) gathered_mask = tf.cast(gathered_mask, tf.bool) return gathered_mask, true_indices
Example #3
Source File: generator_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _scanning_pack(self, dataset): """Apply scan based pack to a dataset.""" if self._chop_long_sequences: dataset = dataset.map(lambda x: (x[:self._packed_length],)) else: dataset = dataset.filter(lambda *x: tf.reduce_max( # pylint: disable=g-long-lambda tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length) # In order to retrieve the sequences which are still in the queue when the # dataset is exhausted, we feed dummy sequences which are guaranteed to # displace the remaining elements. dataset = dataset.concatenate( tf.data.Dataset.range(self._queue_size).map(self._eviction_fn)) initial_state = self._scan_initial_state() step_fn = functools.partial( tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length, queue_size=self._queue_size, spacing=self._spacing, num_sequences=self._num_sequences, token_dtype=self._token_dtype) dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn)) is_real_sample = lambda valid_sample, _: valid_sample return dataset.filter(is_real_sample)
Example #4
Source File: bounds.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def apply_piecewise_monotonic_fn(self, wrapper, fn, boundaries, *args): valid_values = [] for a in [self] + list(args): vs = [] vs.append(a.lower) vs.append(a.upper) for b in boundaries: vs.append( tf.maximum(a.lower, tf.minimum(a.upper, b * tf.ones_like(a.lower)))) valid_values.append(vs) outputs = [] for inputs in itertools.product(*valid_values): outputs.append(fn(*inputs)) outputs = tf.stack(outputs, axis=-1) return IntervalBounds(tf.reduce_min(outputs, axis=-1), tf.reduce_max(outputs, axis=-1))
Example #5
Source File: shape_utils.py From models with Apache License 2.0 | 6 votes |
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1): """Asserts the input box tensor is normalized. Args: boxes: a tensor of shape [N, 4] where N is the number of boxes. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. Returns: a tf.Assert op which fails when the input box tensor is not normalized. Raises: ValueError: When the input box tensor is not normalized. """ box_minimum = tf.reduce_min(boxes) box_maximum = tf.reduce_max(boxes) return tf.Assert( tf.logical_and( tf.less_equal(box_maximum, maximum_normalized_coordinate), tf.greater_equal(box_minimum, 0)), [boxes])
Example #6
Source File: bert_fewshot_classifier.py From language with Apache License 2.0 | 6 votes |
def extract_relation_representations(input_layer, input_ids, tokenizer): """Extracts relation representation from sentence sequence layer.""" entity_representations = [] entity_marker_ids = tokenizer.convert_tokens_to_ids(["[E1]", "[E2]"]) for entity_marker_id in entity_marker_ids: mask = tf.to_float(tf.equal(input_ids, entity_marker_id)) mask = tf.broadcast_to(tf.expand_dims(mask, -1), tf.shape(input_layer)) entity_representation = tf.reduce_max( mask * input_layer, axis=1, keepdims=True) entity_representations.append(entity_representation) output_layer = tf.concat(entity_representations, axis=2) output_layer = tf.squeeze(output_layer, [1]) tf.logging.info("entity marker pooling AFTER output shape %s", output_layer.shape) return output_layer
Example #7
Source File: discretization.py From tensor2tensor with Apache License 2.0 | 6 votes |
def top_k_softmax(x, k): """Calculate softmax(x), select top-k and rescale to sum to 1. Args: x: Input to softmax over. k: Number of top-k to select. Returns: softmax(x) and maximum item. """ x = tf.nn.softmax(x) top_x, _ = tf.nn.top_k(x, k=k + 1) min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True) x = tf.nn.relu((x - min_top) + 1e-12) x /= tf.reduce_sum(x, axis=-1, keep_dims=True) return x, tf.reduce_max(top_x, axis=-1)
Example #8
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 6 votes |
def top_1_tpu(inputs): """find max and argmax over the last dimension. Works well on TPU Args: inputs: A tensor with shape [..., depth] Returns: values: a Tensor with shape [...] indices: a Tensor with shape [...] """ inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True) mask = tf.to_int32(tf.equal(inputs_max, inputs)) index = tf.range(tf.shape(inputs)[-1]) * mask return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
Example #9
Source File: keypoint_ops.py From models with Apache License 2.0 | 6 votes |
def keypoints_to_enclosing_bounding_boxes(keypoints): """Creates enclosing bounding boxes from keypoints. Args: keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints in [y, x] format. Returns: A [num_instances, 4] float32 tensor that tightly covers all the keypoints for each instance. """ ymin = tf.math.reduce_min(keypoints[:, :, 0], axis=1) xmin = tf.math.reduce_min(keypoints[:, :, 1], axis=1) ymax = tf.math.reduce_max(keypoints[:, :, 0], axis=1) xmax = tf.math.reduce_max(keypoints[:, :, 1], axis=1) return tf.stack([ymin, xmin, ymax, xmax], axis=1)
Example #10
Source File: neural_assistant.py From tensor2tensor with Apache License 2.0 | 6 votes |
def compute_max_pool_embedding(input_embeddings, input_lengths): """Computes max pool embedding. Args: input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim] input_lengths: <tf.int64>[bs, 1] Returns: max_pool_embedding: <tf.float32>[bs, emb_dim] """ max_seq_len = tf.shape(input_embeddings)[1] # <tf.float32>[bs, max_seq_len] mask = 1.0 - tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32) mask = tf.squeeze(mask * (-1e-6), 1) mask = tf.expand_dims(mask, 2) # <tf.float32>[bs, emb_dim] max_pool_embedding = tf.reduce_max(input_embeddings + mask, 1) # <tf.float32>[bs, dim] return max_pool_embedding
Example #11
Source File: simplex_bounds.py From interval-bound-propagation with Apache License 2.0 | 6 votes |
def _simplex_bounds(mapped_vertices, mapped_centres, r, axis): """Calculates naive bounds on the given layer-mapped vertices. Args: mapped_vertices: Tensor of shape (num_vertices, *output_shape) or of shape (batch_size, num_vertices, *output_shape) containing the vertices in the layer's output space. mapped_centres: Tensor of shape (batch_size, *output_shape) containing the layer's nominal outputs. r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex. axis: Index of the `num_vertices` dimension of `mapped_vertices`. Returns: lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds on the outputs of the affine layer. ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds on the outputs of the affine layer. """ # Use the negative of r, instead of the complement of r, as # we're shifting the input domain to be centred at the origin. lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis) ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis) return lb_out, ub_out
Example #12
Source File: preprocessor.py From models with Apache License 2.0 | 6 votes |
def one_hot_encoding(labels, num_classes=None): """One-hot encodes the multiclass labels. Example usage: labels = tf.constant([1, 4], dtype=tf.int32) one_hot = OneHotEncoding(labels, num_classes=5) one_hot.eval() # evaluates to [0, 1, 0, 0, 1] Args: labels: A tensor of shape [None] corresponding to the labels. num_classes: Number of classes in the dataset. Returns: onehot_labels: a tensor of shape [num_classes] corresponding to the one hot encoding of the labels. Raises: ValueError: if num_classes is not specified. """ with tf.name_scope('OneHotEncoding', values=[labels]): if num_classes is None: raise ValueError('num_classes must be specified') labels = tf.one_hot(labels, num_classes, 1, 0) return tf.reduce_max(labels, 0)
Example #13
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero): """Recall of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_recall", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #14
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #15
Source File: keypoint_ops.py From models with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width)
Example #16
Source File: rnn_benchmark.py From autograph with Apache License 2.0 | 5 votes |
def _benchmark_eager_dynamic_rnn(self, batch_size, max_seq_len): input_data, sequence_lengths = self._generate_fake_rnn_inputs( batch_size=batch_size, max_seq_len=max_seq_len) rnn_cell, initial_state = self._create_rnn_cell(batch_size=batch_size) def eager_dynamic_rnn(rnn_cell, input_data, initial_state, sequence_length=None): """An eager version of dynamic_rnn.""" # [batch, time, features] -> [time, batch, features] input_data = tf.transpose(input_data, [1, 0, 2]) outputs = [] state = initial_state if sequence_length is None: max_seq_len = input_data.shape[0] else: max_seq_len = tf.reduce_max(sequence_length) for i in range(max_seq_len): new_output, new_state = rnn_cell(input_data[i], state) output = tf.where(i < sequence_length, new_output, tf.zeros(new_output.shape)) state = tf.where(i < sequence_length, new_state, state) outputs.append(output) return tf.transpose(tf.stack(outputs), [1, 0, 2]), state def target(): eager_dynamic_rnn(rnn_cell, input_data, initial_state, sequence_lengths) self.time_execution( ('Eager', batch_size, max_seq_len), target, iter_volume=batch_size, iter_unit='examples', extras={ 'max_seq_len': max_seq_len, 'batch_size': batch_size, })
Example #17
Source File: box_list_ops.py From models with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(boxlist, height, width, check_range=True, maximum_normalized_coordinate=1.1, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than maximum_normalized_coordinate (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert( tf.greater_equal(maximum_normalized_coordinate, box_maximum), ['maximum box coordinate value is larger ' 'than %f: ' % maximum_normalized_coordinate, box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width)
Example #18
Source File: box_list_ops.py From models with Apache License 2.0 | 5 votes |
def prune_non_overlapping_boxes( boxlist1, boxlist2, min_overlap=0.0, scope=None): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. min_overlap: Minimum required overlap between boxes, to count them as overlapping. scope: name scope. Returns: new_boxlist1: A pruned boxlist with size [N', 4]. keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the first input BoxList `boxlist1`. """ with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1]) new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1, keep_inds
Example #19
Source File: box_list_ops_test.py From models with Apache License 2.0 | 5 votes |
def test_sample_boxes_by_jittering(self): def graph_fn(): boxes = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) sampled_boxes = box_list_ops.sample_boxes_by_jittering( boxlist=boxes, num_boxes_to_sample=10) iou = box_list_ops.iou(boxes, sampled_boxes) iou_max = tf.reduce_max(iou, axis=0) return sampled_boxes.get(), iou_max np_sampled_boxes, np_iou_max = self.execute(graph_fn, []) self.assertAllEqual(np_sampled_boxes.shape, [10, 4]) self.assertAllGreater(np_iou_max, 0.3)
Example #20
Source File: box_list_ops.py From models with Apache License 2.0 | 5 votes |
def get_minimal_coverage_box(boxlist, default_box=None, scope=None): """Creates a single bounding box which covers all boxes in the boxlist. Args: boxlist: A Boxlist. default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, this default box will be returned. If None, will use a default box of [[0., 0., 1., 1.]]. scope: Name scope. Returns: A [1, 4] float32 tensor with a bounding box that tightly covers all the boxes in the box list. If the boxlist does not contain any boxes, the default box is returned. """ with tf.name_scope(scope, 'CreateCoverageBox'): num_boxes = boxlist.num_boxes() def coverage_box(bboxes): y_min, x_min, y_max, x_max = tf.split( value=bboxes, num_or_size_splits=4, axis=1) y_min_coverage = tf.reduce_min(y_min, axis=0) x_min_coverage = tf.reduce_min(x_min, axis=0) y_max_coverage = tf.reduce_max(y_max, axis=0) x_max_coverage = tf.reduce_max(x_max, axis=0) return tf.stack( [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage], axis=1) default_box = default_box or tf.constant([[0., 0., 1., 1.]]) return tf.cond( tf.greater_equal(num_boxes, 1), true_fn=lambda: coverage_box(boxlist.get()), false_fn=lambda: default_box)
Example #21
Source File: model_utils.py From language with Apache License 2.0 | 5 votes |
def variable_summaries(var, scope=""): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope(scope): with tf.name_scope("summaries"): mean = tf.reduce_mean(var) tf.summary.scalar("mean", mean) with tf.name_scope("stddev"): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar("stddev", stddev) tf.summary.scalar("max", tf.reduce_max(var)) tf.summary.scalar("min", tf.reduce_min(var)) tf.summary.histogram("histogram", var)
Example #22
Source File: visualization.py From tensor2robot with Apache License 2.0 | 5 votes |
def get_softmax_viz(image, softmax, nrows=None): """Arrange softmax maps in a grid and superimpose them on the image.""" softmax_shape = tf.shape(softmax) batch_size = softmax_shape[0] target_height = softmax_shape[1] * 2 target_width = softmax_shape[2] * 2 num_points = softmax_shape[3] if nrows is None: # Find a number of rows such that the arrangement is as square as possible. num_points_float = tf.cast(num_points, tf.float32) nfsqrt = tf.cast(tf.floor(tf.sqrt(num_points_float)), tf.int32) divs = tf.range(1, nfsqrt + 1) remainders = tf.mod(num_points_float, tf.cast(divs, tf.float32)) divs = tf.gather(divs, tf.where(tf.equal(remainders, 0))) nrows = tf.reduce_max(divs) ncols = tf.cast(num_points / nrows, tf.int32) nrows = tf.cast(nrows, tf.int32) # Normalize per channel img = softmax / tf.reduce_max(softmax, axis=[1, 2], keepdims=True) # Use softmax as hue and saturation and original image as value of HSV image. greyimg = tf.image.rgb_to_grayscale(image) greyimg = tf.image.resize_images(greyimg, [target_height, target_width]) greyimg = tf.tile(greyimg, [1, 1, 1, num_points]) greyimg = tf.reshape(greyimg, [batch_size, target_height, target_width, num_points, 1]) img = tf.image.resize_images(img, [target_height, target_width]) img = tf.reshape(img, [batch_size, target_height, target_width, num_points, 1]) img = tf.concat([img / 2.0 + 0.5, img, greyimg * 0.7 + 0.3], axis=4) # Rearrange channels into a ncols x nrows grid. img = tf.reshape(img, [batch_size, target_height, target_width, nrows, ncols, 3]) img = tf.transpose(img, [0, 3, 1, 4, 2, 5]) img = tf.reshape(img, [batch_size, target_height * nrows, target_width * ncols, 3]) img = tf.image.hsv_to_rgb(img) return img
Example #23
Source File: tensor_utils.py From language with Apache License 2.0 | 5 votes |
def linear_interpolation(t, minimum, maximum): t_min = tf.reduce_min(t) t_max = tf.reduce_max(t) return minimum + (t - t_min) * (maximum - minimum) / (t_max - t_min)
Example #24
Source File: model_utils.py From language with Apache License 2.0 | 5 votes |
def variable_summaries(var, scope=""): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope(scope): with tf.name_scope("summaries"): mean = tf.reduce_mean(var) tf.summary.scalar("mean", mean) with tf.name_scope("stddev"): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar("stddev", stddev) tf.summary.scalar("max", tf.reduce_max(var)) tf.summary.scalar("min", tf.reduce_min(var)) tf.summary.histogram("histogram", var)
Example #25
Source File: utils.py From lamb with Apache License 2.0 | 5 votes |
def compute_lengths(symbols_list, eos_symbol, name=None, dtype=tf.int64): """Computes sequence lengths given end-of-sequence symbol. Args: symbols_list: list of [batch_size] tensors of symbols (e.g. integers). eos_symbol: end of sequence symbol (e.g. integer). name: name for the name scope of this op. dtype: type of symbols, default: tf.int64. Returns: Tensor [batch_size] of lengths of sequences. """ with tf.name_scope(name, 'compute_lengths'): max_len = len(symbols_list) eos_symbol_ = tf.constant(eos_symbol, dtype=dtype) # Array with max_len-time where we have EOS, 0 otherwise. Maximum of this is # the first EOS in that example. ends = [tf.constant(max_len - i, dtype=tf.int64) * tf.to_int64(tf.equal(s, eos_symbol_)) for i, s in enumerate(symbols_list)] # Lengths of sequences, or max_len for sequences that didn't have EOS. # Note: examples that don't have EOS will have max value of 0 and value of # max_len+1 in lens_. lens_ = max_len + 1 - tf.reduce_max(tf.stack(ends, 1), axis=1) # For examples that didn't have EOS decrease max_len+1 to max_len as the # length. lens = tf.subtract(lens_, tf.to_int64(tf.equal(lens_, max_len + 1))) return tf.stop_gradient(tf.reshape(lens, [-1]))
Example #26
Source File: model.py From gpt2-estimator with MIT License | 5 votes |
def softmax(x, axis=-1): x = x - tf.reduce_max(x, axis=axis, keepdims=True) ex = tf.exp(x) return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)
Example #27
Source File: distri.py From nni with MIT License | 5 votes |
def kl(self, other): """kl""" a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True) ea0 = tf.exp(a0) ea1 = tf.exp(a1) z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1)
Example #28
Source File: distri.py From nni with MIT License | 5 votes |
def entropy(self): """compute entropy""" a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
Example #29
Source File: embedding_util.py From language with Apache License 2.0 | 5 votes |
def get_nearest_neighbour(source, reference): """Get the nearest neighbour for every vector in source from reference.""" normed_reference = tf.nn.l2_normalize(reference, axis=-1) normed_source = tf.nn.l2_normalize(source, axis=-1) cosine_sim = tf.matmul(normed_source, normed_reference, transpose_b=True) # Calculate the nearest neighbours and their cosine similarity nearest_neighbour = tf.argmax(cosine_sim, axis=-1) nearest_neighbour_sim = tf.reduce_max(cosine_sim, axis=-1) return nearest_neighbour, nearest_neighbour_sim
Example #30
Source File: utils.py From mesh with Apache License 2.0 | 5 votes |
def metric_max(values, name=None, **kwargs): del kwargs with tf.variable_scope(name, "metric_max", [values]): accum = tf.get_variable( "accum", shape=[], dtype=tf.float32, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], initializer=tf.zeros_initializer()) update_op = tf.assign( accum, tf.maximum(accum, tf.reduce_max(tf.cast(values, tf.float32)))) return accum, update_op