Python tensorflow.zeros_like() Examples
The following are 30
code examples of tensorflow.zeros_like().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: normalize.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__( self, template, center=True, scale=True, clip=10, name='normalize'): """Normalize tensors based on streaming estimates of mean and variance. Centering the value, scaling it by the standard deviation, and clipping outlier values are optional. Args: template: Example tensor providing shape and dtype of the vaule to track. center: Python boolean indicating whether to subtract mean from values. scale: Python boolean indicating whether to scale values by stddev. clip: If and when to clip normalized values. name: Parent scope of operations provided by this class. """ self._center = center self._scale = scale self._clip = clip self._name = name with tf.name_scope(name): self._count = tf.Variable(0, False) self._mean = tf.Variable(tf.zeros_like(template), False) self._var_sum = tf.Variable(tf.zeros_like(template), False)
Example #2
Source File: progressive.py From DOTA_models with Apache License 2.0 | 6 votes |
def _Apply(self, x): assert self._current_layer < self._layer_count # Layer state is set to 0 when there is no previous iteration. if self._layer_state is None: self._layer_state = tf.zeros_like(x, dtype=tf.float32) # Code estimation using both: # - the state from the previous iteration/layer, # - the binary codes that are before in raster scan order. estimated_codes = self._brnn_predictors[self._current_layer]( x, self._layer_state) # Compute the updated layer state. h = self._state_blocks[self._current_layer](x) self._layer_state = self._layer_rnn(h) self._current_layer += 1 return estimated_codes
Example #3
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def build_pgd_attack(self, eps): victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32) def one_step_attack(image, grad): """ core components of this attack are: (a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf) (b) momentum (https://arxiv.org/pdf/1710.06081.pdf) (c) input diversity (https://arxiv.org/pdf/1803.06978.pdf) """ orig_image = image image = self.structure(image) image = (image - 127.5) / 128.0 image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2) prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.reshape(embeddings[0], [512, 1]) objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized noise, = tf.gradients(objective, orig_image) noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True) noise = 0.9 * grad + noise adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound) return adv, noise input = tf.to_float(self.image_batch) lower_bound = tf.clip_by_value(input - eps, 0, 255.) upper_bound = tf.clip_by_value(input + eps, 0, 255.) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): adv, _ = tf.while_loop( lambda _, __: True, one_step_attack, (input, tf.zeros_like(input)), back_prop=False, maximum_iterations=100, parallel_iterations=1) self.adv_image = adv return adv
Example #4
Source File: normalize.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__( self, template, center=True, scale=True, clip=10, name='normalize'): """Normalize tensors based on streaming estimates of mean and variance. Centering the value, scaling it by the standard deviation, and clipping outlier values are optional. Args: template: Example tensor providing shape and dtype of the vaule to track. center: Python boolean indicating whether to subtract mean from values. scale: Python boolean indicating whether to scale values by stddev. clip: If and when to clip normalized values. name: Parent scope of operations provided by this class. """ self._center = center self._scale = scale self._clip = clip self._name = name with tf.name_scope(name): self._count = tf.Variable(0, False) self._mean = tf.Variable(tf.zeros_like(template), False) self._var_sum = tf.Variable(tf.zeros_like(template), False)
Example #5
Source File: layer.py From 3DGCN with MIT License | 6 votes |
def call(self, inputs, mask=None): # Import graph tensors # atoms = (samples, max_atoms, atom_feat) # distances = (samples, max_atoms, max_atoms, coor_dims) atoms, distances = inputs # Get parameters max_atoms = int(atoms.shape[1]) atom_feat = int(atoms.shape[-1]) coor_dims = int(distances.shape[-1]) # Generate vector features filled with zeros vector_features = tf.zeros_like(atoms) vector_features = tf.reshape(vector_features, [-1, max_atoms, 1, atom_feat]) vector_features = tf.tile(vector_features, [1, 1, coor_dims, 1]) return [atoms, vector_features]
Example #6
Source File: categorical_calibration_layer.py From lattice with Apache License 2.0 | 6 votes |
def call(self, inputs): """Standard Keras call() method.""" if inputs.dtype not in [tf.uint8, tf.int32, tf.int64]: inputs = tf.cast(inputs, dtype=tf.int32) if self.default_input_value is not None: default_input_value_tensor = tf.constant( int(self.default_input_value), dtype=inputs.dtype, name=DEFAULT_INPUT_VALUE_NAME) replacement = tf.zeros_like(inputs) + (self.num_buckets - 1) inputs = tf.where( tf.equal(inputs, default_input_value_tensor), replacement, inputs) # We can't use tf.gather_nd(self.kernel, inputs) as it doesn't support # constraints (constraint functions are not supported for IndexedSlices). # Instead we use matrix multiplication by one-hot encoding of the index. if self.units == 1: # This can be slightly faster as it uses matmul. return tf.matmul( tf.one_hot(tf.squeeze(inputs, axis=[-1]), depth=self.num_buckets), self.kernel) return tf.reduce_sum( tf.one_hot(inputs, axis=1, depth=self.num_buckets) * self.kernel, axis=1)
Example #7
Source File: preprocessor_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testRandomFlipBoxes(self): boxes = self.createTestBoxes() # Case where the boxes are flipped. boxes_expected1 = self.expectedBoxesAfterMirroring() # Case where the boxes are not flipped. boxes_expected2 = boxes # After elementwise multiplication, the result should be all-zero since one # of them is all-zero. boxes_diff = tf.multiply( tf.squared_difference(boxes, boxes_expected1), tf.squared_difference(boxes, boxes_expected2)) expected_result = tf.zeros_like(boxes_diff) with self.test_session() as sess: (boxes_diff, expected_result) = sess.run([boxes_diff, expected_result]) self.assertAllEqual(boxes_diff, expected_result)
Example #8
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def matched_iou(boxlist1, boxlist2, scope=None): """Compute intersection-over-union between corresponding boxes in boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing pairwise iou scores. """ with tf.name_scope(scope, 'MatchedIOU'): intersections = matched_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = areas1 + areas2 - intersections return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
Example #9
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def iou(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise iou scores. """ with tf.name_scope(scope, 'IOU'): intersections = intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
Example #10
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def compute_first_or_last(self, select, first=True): #perform first ot last operation on row select with probabilistic row selection answer = tf.zeros_like(select) running_sum = tf.zeros([self.batch_size, 1], self.data_type) for i in range(self.max_elements): if (first): current = tf.slice(select, [0, i], [self.batch_size, 1]) else: current = tf.slice(select, [0, self.max_elements - 1 - i], [self.batch_size, 1]) curr_prob = current * (1 - running_sum) curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type) running_sum += curr_prob temp_ans = [] curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0) for i_ans in range(self.max_elements): if (not (first) and i_ans == self.max_elements - 1 - i): temp_ans.append(curr_prob) elif (first and i_ans == i): temp_ans.append(curr_prob) else: temp_ans.append(tf.zeros_like(curr_prob)) temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans)) answer += temp_ans return answer
Example #11
Source File: ssd_meta_arch.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _minibatch_subsample_fn(self, inputs): """Randomly samples anchors for one image. Args: inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors, num_classes] indicating targets assigned to each anchor. Second one is a tensor of shape [num_anchors] indicating the class weight of each anchor. Returns: batch_sampled_indicator: bool tensor of shape [num_anchors] indicating whether the anchor should be selected for loss computation. """ cls_targets, cls_weights = inputs if self._add_background_class: # Set background_class bits to 0 so that the positives_indicator # computation would not consider background class. background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1])) regular_class = tf.slice(cls_targets, [0, 1], [-1, -1]) cls_targets = tf.concat([background_class, regular_class], 1) positives_indicator = tf.reduce_sum(cls_targets, axis=1) return self._random_example_sampler.subsample( tf.cast(cls_weights, tf.bool), batch_size=None, labels=tf.cast(positives_indicator, tf.bool))
Example #12
Source File: model.py From TransferRL with MIT License | 6 votes |
def _coverage_loss(attn_dists, padding_mask): """Calculates the coverage loss from the attention distributions. Args: attn_dists: The attention distributions for each decoder timestep. A list length max_dec_steps containing shape (batch_size, attn_length) padding_mask: shape (batch_size, max_dec_steps). Returns: coverage_loss: scalar """ coverage = tf.zeros_like(attn_dists[0]) # shape (batch_size, attn_length). Initial coverage is zero. covlosses = [] # Coverage loss per decoder timestep. Will be list length max_dec_steps containing shape (batch_size). for a in attn_dists: covloss = tf.reduce_sum(tf.minimum(a, coverage), [1]) # calculate the coverage loss for this step covlosses.append(covloss) coverage += a # update the coverage vector coverage_loss = _mask_and_avg(covlosses, padding_mask) return coverage_loss
Example #13
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reinit_nested_vars(variables, indices=None): """Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variaables. indices: Indices along the first dimension to reset, defaults to all. Returns: Operation. """ if isinstance(variables, (tuple, list)): return tf.group(*[ reinit_nested_vars(variable, indices) for variable in variables]) if indices is None: return variables.assign(tf.zeros_like(variables)) else: zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list()) return tf.scatter_update(variables, indices, zeros)
Example #14
Source File: preprocessor_test.py From object_detector_app with MIT License | 6 votes |
def testRandomFlipBoxes(self): boxes = self.createTestBoxes() # Case where the boxes are flipped. boxes_expected1 = self.expectedBoxesAfterMirroring() # Case where the boxes are not flipped. boxes_expected2 = boxes # After elementwise multiplication, the result should be all-zero since one # of them is all-zero. boxes_diff = tf.multiply( tf.squared_difference(boxes, boxes_expected1), tf.squared_difference(boxes, boxes_expected2)) expected_result = tf.zeros_like(boxes_diff) with self.test_session() as sess: (boxes_diff, expected_result) = sess.run([boxes_diff, expected_result]) self.assertAllEqual(boxes_diff, expected_result)
Example #15
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
Example #16
Source File: region_similarity_calculator.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists and score. Args: boxlist1: BoxList holding N boxes. Must have a score field. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing scores threholded by pairwise iou scores. """ ious = box_list_ops.iou(boxlist1, boxlist2) scores = boxlist1.get_field(fields.BoxListFields.scores) scores = tf.expand_dims(scores, axis=1) row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]]) thresholded_ious = tf.where(ious > self._iou_threshold, row_replicated_scores, tf.zeros_like(ious)) return thresholded_ious
Example #17
Source File: box_list_ops.py From object_detector_app with MIT License | 6 votes |
def matched_iou(boxlist1, boxlist2, scope=None): """Compute intersection-over-union between corresponding boxes in boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing pairwise iou scores. """ with tf.name_scope(scope, 'MatchedIOU'): intersections = matched_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = areas1 + areas2 - intersections return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
Example #18
Source File: box_list_ops.py From object_detector_app with MIT License | 6 votes |
def iou(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise iou scores. """ with tf.name_scope(scope, 'IOU'): intersections = intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
Example #19
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reinit_nested_vars(variables, indices=None): """Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variaables. indices: Batch indices to reset, defaults to all. Returns: Operation. """ if isinstance(variables, (tuple, list)): return tf.group(*[ reinit_nested_vars(variable, indices) for variable in variables]) if indices is None: return variables.assign(tf.zeros_like(variables)) else: zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list()) return tf.scatter_update(variables, indices, zeros)
Example #20
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
Example #21
Source File: bidirectional.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): # review input - Both original and reversed self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #22
Source File: stacked_bidirectional.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): # review input - Both original and reversed self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #23
Source File: simple.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #24
Source File: stacked_simple.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #25
Source File: autoencoders.py From fine-lm with MIT License | 6 votes |
def bottleneck(self, x): # pylint: disable=arguments-differ hparams = self.hparams if hparams.unordered: return super(AutoencoderOrderedDiscrete, self).bottleneck(x) noise = hparams.bottleneck_noise hparams.bottleneck_noise = 0.0 # We'll add noise below. x, loss = discretization.parametrized_bottleneck(x, hparams) hparams.bottleneck_noise = noise if hparams.mode == tf.estimator.ModeKeys.TRAIN: # We want a number p such that p^bottleneck_bits = 1 - noise. # So log(p) * bottleneck_bits = log(noise) log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits) # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits. noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1)) # Having the no-noise mask, we can make noise just uniformly at random. ordered_noise = tf.random_uniform(tf.shape(x)) # We want our noise to be 1s at the start and random {-1, 1} bits later. ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise)) # Now we flip the bits of x on the noisy positions (ordered and normal). x *= 2.0 * ordered_noise - 1 return x, loss
Example #26
Source File: ppo.py From fine-lm with MIT License | 6 votes |
def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): """Generalized advantage estimator.""" # Below is slight weirdness, we set the last reward to 0. # This makes the advantage to be 0 in the last timestep reward = tf.concat([reward[:-1, :], value[-1:, :]], axis=0) next_value = tf.concat([value[1:, :], tf.zeros_like(value[-1:, :])], axis=0) next_not_done = 1 - tf.cast(tf.concat([done[1:, :], tf.zeros_like(done[-1:, :])], axis=0), tf.float32) delta = reward + gae_gamma * next_value * next_not_done - value return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
Example #27
Source File: tf_atari_wrappers.py From fine-lm with MIT License | 6 votes |
def simulate(self, action): with tf.name_scope("environment/simulate"): # Do we need this? initializer = (tf.zeros_like(self._observ), tf.fill((len(self),), 0.0), tf.fill((len(self),), False)) def not_done_step(a, _): reward, done = self._batch_env.simulate(action) with tf.control_dependencies([reward, done]): # TODO(piotrmilos): possibly ignore envs with done r0 = tf.maximum(a[0], self._batch_env.observ) r1 = tf.add(a[1], reward) r2 = tf.logical_or(a[2], done) return (r0, r1, r2) simulate_ret = tf.scan(not_done_step, tf.range(self.skip), initializer=initializer, parallel_iterations=1, infer_shape=False) simulate_ret = [ret[-1, ...] for ret in simulate_ret] with tf.control_dependencies([self._observ.assign(simulate_ret[0])]): return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2])
Example #28
Source File: autoencoders.py From fine-lm with MIT License | 6 votes |
def sample(self, features=None): del features hp = self.hparams div_x = 2**hp.num_hidden_layers div_y = 1 if self.is1d else 2**hp.num_hidden_layers size = [ hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y, hp.bottleneck_bits ] rand = tf.random_uniform(size) res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 # If you want to set some first bits to a fixed value, do this: # fixed = tf.zeros_like(rand) - 1.0 # nbits = 3 # res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1) return res
Example #29
Source File: metrics.py From fine-lm with MIT License | 6 votes |
def padded_accuracy_topk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) effective_k = tf.minimum(k, common_layers.shape_list(padded_predictions)[-1]) _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=-1) return same_topk, weights
Example #30
Source File: metrics.py From fine-lm with MIT License | 6 votes |
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)