Python tensorflow.squeeze() Examples
The following are 30
code examples of tensorflow.squeeze().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: tf_atari_wrappers.py From fine-lm with MIT License | 6 votes |
def simulate(self, action): with tf.name_scope("environment/simulate"): # Do we need this? initializer = (tf.zeros(self.old_shape, dtype=tf.float32), tf.fill((len(self),), 0.0), tf.fill((len(self),), False)) def not_done_step(a, _): reward, done = self._batch_env.simulate(action) with tf.control_dependencies([reward, done]): r0 = self._batch_env.observ + 0 r1 = tf.add(a[1], reward) r2 = tf.logical_or(a[2], done) return (r0, r1, r2) simulate_ret = tf.scan(not_done_step, tf.range(self.skip), initializer=initializer, parallel_iterations=1, infer_shape=False) observations, rewards, dones = simulate_ret split_observations = tf.split(observations, self.skip, axis=0) split_observations = [tf.squeeze(o, axis=0) for o in split_observations] observation = tf.concat(split_observations, axis=-1) with tf.control_dependencies([self._observ.assign(observation)]): return tf.identity(rewards[-1, ...]), tf.identity(dones[-1, ...])
Example #2
Source File: discriminator.py From SSGAN-Tensorflow with MIT License | 6 votes |
def __call__(self, input): with tf.variable_scope(self.name, reuse=self._reuse): if not self._reuse: print('\033[93m'+self.name+'\033[0m') _ = input num_channel = [32, 64, 128, 256, 256, 512] num_layer = np.ceil(np.log2(min(_.shape.as_list()[1:3]))).astype(np.int) for i in range(num_layer): ch = num_channel[i] if i < len(num_channel) else 512 _ = conv2d(_, ch, self._is_train, info=not self._reuse, norm=self._norm_type, name='conv{}'.format(i+1)) _ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1, info=not self._reuse, norm='None', name='conv{}'.format(i+2)) _ = conv2d(_, self._num_class+1, self._is_train, k=1, s=1, info=not self._reuse, activation_fn=None, norm='None', name='conv{}'.format(i+3)) _ = tf.squeeze(_) if not self._reuse: log.info('discriminator output {}'.format(_.shape.as_list())) self._reuse = True self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name) return tf.nn.sigmoid(_), _
Example #3
Source File: autoencoders.py From fine-lm with MIT License | 6 votes |
def decode(self, bottleneck): """Auto-decode from the bottleneck and return the result.""" # Get the shape from bottleneck and num channels. shape = common_layers.shape_list(bottleneck) try: num_channels = self.hparams.problem.num_channels except AttributeError: num_channels = 1 dummy_targets = tf.zeros(shape[:-1] + [num_channels]) # Set the bottleneck to decode. if len(shape) > 4: bottleneck = tf.squeeze(bottleneck, axis=[1]) bottleneck = 2 * bottleneck - 1 # Be -1/1 instead of 0/1. self._cur_bottleneck_tensor = bottleneck # Run decoding. res = self.infer({"targets": dummy_targets}) self._cur_bottleneck_tensor = None return res
Example #4
Source File: attention_lm.py From fine-lm with MIT License | 6 votes |
def body(self, features): # Remove dropout if not training hparams = self._hparams targets = features["targets"] targets = tf.squeeze(targets, 2) (decoder_input, decoder_self_attention_bias) = attention_lm_prepare_decoder( targets, hparams) decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout) decoder_output = attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams) decoder_output = tf.expand_dims(decoder_output, 2) return decoder_output
Example #5
Source File: resnet.py From fine-lm with MIT License | 6 votes |
def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, alpha=0.0, use_tpu=False): """Predict.""" del decode_length, beam_size, top_beams, alpha, use_tpu assert features is not None logits, _ = self(features) # pylint: disable=not-callable assert len(logits.get_shape()) == 5 logits = tf.squeeze(logits, [1, 2, 3]) log_probs = common_layers.log_prob_from_logits(logits) predictions, scores = common_layers.argmax_with_score(log_probs) return { "outputs": predictions, "scores": scores, }
Example #6
Source File: bleu_hook.py From fine-lm with MIT License | 6 votes |
def bleu_score(predictions, labels, **unused_kwargs): """BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0)
Example #7
Source File: transformer_test.py From fine-lm with MIT License | 6 votes |
def _create_greedy_infer_model(self): """Creates model for greedy inference testing. Returns: model: A t2t model. features: An map of string to tensor. """ model, features = get_model(transformer.transformer_small()) out_logits, _ = model(features) out_logits = tf.squeeze(out_logits, axis=[2, 3]) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), labels=tf.reshape(features["targets"], [-1])) loss = tf.reduce_mean(loss) apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) with self.test_session(): tf.global_variables_initializer().run() for _ in range(100): apply_grad.run() model.set_mode(tf.estimator.ModeKeys.PREDICT) return model, features
Example #8
Source File: transformer_test.py From fine-lm with MIT License | 6 votes |
def testGreedyTPUSlowVsFast(self): if not tf_version_has_inplace_ops(): return decode_length = 3 model, features = self._create_greedy_infer_model() with tf.variable_scope(tf.get_variable_scope(), reuse=True): slow_result = model._slow_greedy_infer_tpu( features, decode_length)["outputs"] slow_result = tf.squeeze(slow_result, axis=[2, 3]) fast_result = model._greedy_infer( features, decode_length, use_tpu=True)["outputs"] with self.test_session(): slow_res = slow_result.eval() fast_res = fast_result.eval() self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) self.assertAllClose(fast_res, slow_res)
Example #9
Source File: image_processing.py From DOTA_models with Apache License 2.0 | 6 votes |
def eval_image(image, height, width, scope=None): """Prepare one image for evaluation. Args: image: 3-D float Tensor height: integer width: integer scope: Optional scope for name_scope. Returns: 3-D float Tensor of prepared image. """ with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'): # Crop the central region of the image with an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) # Resize the image to the original height and width. image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, [0]) return image
Example #10
Source File: modalities.py From fine-lm with MIT License | 6 votes |
def bottom_simple(self, x, name, reuse): with tf.variable_scope(name, reuse=reuse): # Ensure the inputs are 3-D if len(x.get_shape()) == 4: x = tf.squeeze(x, axis=3) while len(x.get_shape()) < 3: x = tf.expand_dims(x, axis=-1) var = self._get_weights() x = common_layers.dropout_no_scaling( x, 1.0 - self._model_hparams.symbol_dropout) ret = common_layers.gather(var, x) if self._model_hparams.multiply_embedding_mode == "sqrt_depth": ret *= self._body_input_depth**0.5 ret *= tf.expand_dims(tf.to_float(tf.not_equal(x, 0)), -1) return ret
Example #11
Source File: modalities.py From fine-lm with MIT License | 6 votes |
def loss(self, top_out, targets): """Compute the CTC loss.""" logits = top_out with tf.name_scope("ctc_loss", values=[logits, targets]): # For CTC we assume targets are 1d, [batch, length, 1, 1] here. targets_shape = targets.get_shape().as_list() assert len(targets_shape) == 4 assert targets_shape[2] == 1 assert targets_shape[3] == 1 targets = tf.squeeze(targets, axis=[2, 3]) logits = tf.squeeze(logits, axis=[2, 3]) targets_mask = 1 - tf.to_int32(tf.equal(targets, 0)) targets_lengths = tf.reduce_sum(targets_mask, axis=1) sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse( targets, targets_lengths) xent = tf.nn.ctc_loss( sparse_targets, logits, targets_lengths, time_major=False, preprocess_collapse_repeated=False, ctc_merge_repeated=False) weights = self.targets_weights_fn(targets) # pylint: disable=not-callable return tf.reduce_sum(xent), tf.reduce_sum(weights)
Example #12
Source File: modalities.py From fine-lm with MIT License | 6 votes |
def get_channel_embeddings(self, io_depth, targets, hidden_size, name="channel"): """Get separate embedding for each of the channels.""" targets_split = tf.split(targets, io_depth, axis=3) rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, [256 * io_depth, hidden_size]) rgb_embedding_var = tf.identity(rgb_embedding_var) rgb_embedding_var *= float(hidden_size)**0.5 channel_target_embs = [] for i in range(io_depth): # Adding the channel offsets to get the right embedding since the # embedding tensor has shape 256 * io_depth, hidden_size target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256 target_embs = common_layers.gather(rgb_embedding_var, target_ids) channel_target_embs.append(target_embs) return tf.concat(channel_target_embs, axis=-1)
Example #13
Source File: rouge.py From fine-lm with MIT License | 6 votes |
def rouge_l_fscore(predictions, labels, **unused_kwargs): """ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return rouge_l_f_score, tf.constant(1.0)
Example #14
Source File: rouge.py From fine-lm with MIT License | 6 votes |
def rouge_2_fscore(predictions, labels, **unused_kwargs): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
Example #15
Source File: density_model.py From cs294-112_hws with MIT License | 6 votes |
def get_prob(self, state): """ ### PROBLEM 3 ### YOUR CODE HERE args: state: np array (batch_size, ob_dim) TODO: likelihood: evaluate the discriminator D(x,x) on the same input prob: compute the probability density of x from the discriminator likelihood (see homework doc) """ likelihood = self.get_likelihood(state, state) # avoid divide by 0 and log(0) likelihood = np.clip(np.squeeze(likelihood), 1e-5, 1-1e-5) prob = (1 - likelihood) / likelihood return prob
Example #16
Source File: vgg_preprocessing.py From DOTA_models with Apache License 2.0 | 6 votes |
def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image
Example #17
Source File: metrics.py From fine-lm with MIT License | 6 votes |
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero): """Recall of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_recall", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #18
Source File: metrics.py From fine-lm with MIT License | 6 votes |
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #19
Source File: common_layers.py From fine-lm with MIT License | 6 votes |
def top_1_tpu(inputs): """find max and argmax over the last dimension. Works well on TPU Args: inputs: A tensor with shape [..., depth] Returns: values: a Tensor with shape [...] indices: a Tensor with shape [...] """ inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True) mask = tf.to_int32(tf.equal(inputs_max, inputs)) index = tf.range(tf.shape(inputs)[-1]) * mask return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
Example #20
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1): """Make x n-d with squeeze and expand_dims.""" if len(x.shape) > n: while len(x.shape) != n: x = tf.squeeze(x, [squeeze_dim]) else: while len(x.shape) != n: x = tf.expand_dims(x, expand_dim) return x
Example #21
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def sepconv_relu_sepconv(inputs, filter_size, output_size, first_kernel_size=(1, 1), second_kernel_size=(1, 1), padding="LEFT", nonpadding_mask=None, dropout=0.0, name=None): """Hidden layer with RELU activation followed by linear projection.""" with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) if inputs.get_shape().ndims == 3: is_3d = True inputs = tf.expand_dims(inputs, 2) else: is_3d = False h = separable_conv( inputs, filter_size, first_kernel_size, activation=tf.nn.relu, padding=padding, name="conv1") if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) ret = separable_conv( h, output_size, second_kernel_size, padding=padding, name="conv2") if is_3d: ret = tf.squeeze(ret, 2) return ret # DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv
Example #22
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs): return tf.squeeze( conv( tf.expand_dims(inputs, 2), filters, (kernel_size, 1), dilation_rate=(dilation_rate, 1), **kwargs), 2)
Example #23
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0, symbol_dropout_rate=0.0, embedding_var=None, dtype=tf.float32): """Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" with tf.variable_scope( name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype): if embedding_var is None: embedding_var = tf.get_variable("kernel", [vocab_size, dense_size]) # On the backwards pass, we want to convert the gradient from # an indexed-slices to a regular tensor before sending it back to the # parameter server. This avoids excess computation on the parameter server. if not tf.contrib.eager.in_eager_mode(): embedding_var = convert_gradient_to_tensor(embedding_var) x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate) emb_x = gather(embedding_var, x, dtype) if multiplier != 1.0: emb_x *= multiplier static_shape = emb_x.shape.as_list() if len(static_shape) < 5: return emb_x assert len(static_shape) == 5 # If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1. return tf.squeeze(emb_x, 3)
Example #24
Source File: transformer_test.py From fine-lm with MIT License | 5 votes |
def testSlowVsFast(self): model, features = get_model(transformer.transformer_small()) decode_length = 3 out_logits, _ = model(features) out_logits = tf.squeeze(out_logits, axis=[2, 3]) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), labels=tf.reshape(features["targets"], [-1])) loss = tf.reduce_mean(loss) apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) with self.test_session(): tf.global_variables_initializer().run() for _ in range(100): apply_grad.run() model.set_mode(tf.estimator.ModeKeys.PREDICT) with tf.variable_scope(tf.get_variable_scope(), reuse=True): greedy_result = model._slow_greedy_infer( features, decode_length)["outputs"] greedy_result = tf.squeeze(greedy_result, axis=[2, 3]) fast_result = model._greedy_infer(features, decode_length)["outputs"] with self.test_session(): greedy_res = greedy_result.eval() fast_res = fast_result.eval() self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) self.assertAllClose(greedy_res, fast_res)
Example #25
Source File: metrics.py From fine-lm with MIT License | 5 votes |
def image_rmse(predictions, labels, weights_fn=common_layers.weights_all): """RMSE but will argmax if last dim is not 1.""" if common_layers.shape_list(predictions)[-1] == 1: predictions = tf.squeeze(predictions, axis=[-1]) else: predictions = tf.argmax(predictions, axis=-1) return padded_rmse(predictions, labels, weights_fn)
Example #26
Source File: transformer_vae.py From fine-lm with MIT License | 5 votes |
def attend(x, source, hparams, name): """Self-attention layer with source as memory antecedent.""" with tf.variable_scope(name): x = tf.squeeze(x, axis=2) if len(source.get_shape()) > 3: source = tf.squeeze(source, axis=2) source = common_attention.add_timing_signal_1d(source) y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), source, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) res = common_layers.layer_postprocess(x, y, hparams) return tf.expand_dims(res, axis=2)
Example #27
Source File: metrics.py From fine-lm with MIT License | 5 votes |
def abs_error(predictions, labels, weights_fn=None): """Computes mean(abs(preds-target)).""" del weights_fn # Unused targets = tf.squeeze(labels, axis=[2, 3]) batch_abs_error = tf.abs(predictions - targets) den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32) return (batch_abs_error, den)
Example #28
Source File: transformer.py From fine-lm with MIT License | 5 votes |
def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, alpha=0.0, use_tpu=False): """Returns the targets and their log probabilities.""" del decode_length, beam_size, top_beams, alpha, use_tpu assert features is not None # Run the model self.hparams.force_full_predict = True with tf.variable_scope(self.name): logits, _ = self.model_fn(features) assert len(logits.shape) == 5 # [batch, time, 1, 1, vocab] logits = tf.squeeze(logits, [2, 3]) # Compute the log probabilities log_probs = common_layers.log_prob_from_logits(logits) targets = features["targets"] assert len(targets.shape) == 4 # [batch, time, 1, 1] targets = tf.squeeze(targets, [2, 3]) # Slice out the log_probs of the targets log_probs = common_layers.index_last_dim_with_indices(log_probs, targets) # Sum over time to get the log_prob of the sequence scores = tf.reduce_sum(log_probs, axis=1) return {"outputs": targets, "scores": scores}
Example #29
Source File: metrics.py From fine-lm with MIT License | 5 votes |
def rounding_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Sequence accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions), axis=-1) weights = weights_fn(labels) labels = tf.to_int32(labels) not_correct = tf.to_float(tf.not_equal(outputs, labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Example #30
Source File: metrics.py From fine-lm with MIT License | 5 votes |
def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all): """Create metrics accumulators and averager for Eager mode. Args: metric_names: list<str> from Metrics enum weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val> """ metric_fns = dict( [(name, METRICS_FNS[name]) for name in metric_names]) tfe_metrics = dict() for name in metric_names: tfe_metrics[name] = tfe.metrics.Mean(name=name) def metric_accum(predictions, targets): for name, metric_fn in metric_fns.items(): val, weight = metric_fn(predictions, targets, weights_fn=weights_fn) tfe_metrics[name](np.squeeze(val), np.squeeze(weight)) def metric_means(): avgs = {} for name in metric_names: avgs[name] = tfe_metrics[name].result().numpy() return avgs return metric_accum, metric_means # Metrics are functions that take predictions and labels and return # a tensor of metrics and a tensor of weights. # If the function has "features" as an argument, it will receive the whole # features dict as well. # The results are passed to tf.metrics.mean to accumulate properly.