Python tensorflow.less() Examples
The following are 30
code examples of tensorflow.less().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: hmc.py From zhusuan with MIT License | 7 votes |
def _leapfrog(self, q, p, step_size, get_gradient, mass): def loop_cond(i, q, p): return i < self.n_leapfrogs + 1 def loop_body(i, q, p): step_size1 = tf.cond(i > 0, lambda: step_size, lambda: tf.constant(0.0, dtype=tf.float32)) step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs), tf.less(0, i)), lambda: step_size, lambda: step_size / 2) q, p = leapfrog_integrator(q, p, step_size1, step_size2, lambda q: get_gradient(q), mass) return [i + 1, q, p] i = tf.constant(0) _, q, p = tf.while_loop(loop_cond, loop_body, [i, q, p], back_prop=False, parallel_iterations=1) return q, p
Example #2
Source File: discretization.py From fine-lm with MIT License | 7 votes |
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode, isemhash_noise_dev=0.5, isemhash_mix_prob=0.5): """Improved semantic hashing bottleneck.""" with tf.variable_scope("isemhash_bottleneck"): x = tf.layers.dense(x, bottleneck_bits, name="dense") y = common_layers.saturating_sigmoid(x) if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN: noise = tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev) y = common_layers.saturating_sigmoid(x + noise) d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y) d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1]. if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits. noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN, max_prob=isemhash_mix_prob) return d, 0.0
Example #3
Source File: jumpNormalAlgorithms.py From decompose with MIT License | 6 votes |
def mode(cls, parameters: Dict[str, Tensor]) -> Tensor: mu = parameters["mu"] tau = parameters["tau"] nu = parameters["nu"] beta = parameters["beta"] lam = 1./beta mode = tf.zeros_like(mu) * tf.zeros_like(mu) mode = tf.where(tf.logical_and(tf.greater(nu, mu), tf.less(mu+lam/tau, nu)), mu+lam/tau, mode) mode = tf.where(tf.logical_and(tf.greater(nu, mu), tf.greater_equal(mu+lam/tau, nu)), nu, mode) mode = tf.where(tf.logical_and(tf.less_equal(nu, mu), tf.greater(mu-lam/tau, nu)), mu-lam/tau, mode) mode = tf.where(tf.logical_and(tf.less_equal(nu, mu), tf.less_equal(mu-lam/tau, nu)), nu, mode) return(mode)
Example #4
Source File: common_attention.py From fine-lm with MIT License | 6 votes |
def add_positional_embedding(x, max_length, name, positions=None): """Add positional embedding. Args: x: a Tensor with shape [batch, length, depth] max_length: an integer. static maximum size of any dimension. name: a name for this layer. positions: an optional tensor with shape [batch, length] Returns: a Tensor the same shape as x. """ _, length, depth = common_layers.shape_list(x) var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) if positions is None: sliced = tf.cond( tf.less(length, max_length), lambda: tf.slice(var, [0, 0], [length, -1]), lambda: tf.pad(var, [[0, length - max_length], [0, 0]])) return x + tf.expand_dims(sliced, 0) else: return x + tf.gather(var, tf.to_int32(positions))
Example #5
Source File: hmc.py From zhusuan with MIT License | 6 votes |
def _adapt_mass(self, t, num_chain_dims): ewmv = ExponentialWeightedMovingVariance( self.mass_decay, self.data_shapes, num_chain_dims) new_mass = tf.cond(self.adapt_mass, lambda: ewmv.get_updated_precision(self.q), lambda: ewmv.precision()) if not isinstance(new_mass, list): new_mass = [new_mass] # print('New mass is = {}'.format(new_mass)) # TODO incorrect shape? # print('New mass={}'.format(new_mass)) # print('q={}, NMS={}'.format(self.q[0].get_shape(), # new_mass[0].get_shape())) with tf.control_dependencies(new_mass): current_mass = tf.cond( tf.less(tf.cast(t, tf.int32), self.mass_collect_iters), lambda: [tf.ones(shape) for shape in self.data_shapes], lambda: new_mass) if not isinstance(current_mass, list): current_mass = [current_mass] return current_mass
Example #6
Source File: neural_gpu.py From fine-lm with MIT License | 6 votes |
def neural_gpu_body(inputs, hparams, name=None): """The core Neural GPU.""" with tf.variable_scope(name, "neural_gpu"): def step(state, inp): # pylint: disable=missing-docstring x = tf.nn.dropout(state, 1.0 - hparams.dropout) for layer in range(hparams.num_hidden_layers): x = common_layers.conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, name="cgru_%d" % layer) # Padding input is zeroed-out in the modality, we check this by summing. padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001) new_state = tf.where(padding_inp, state, x) # No-op where inp is padding. return new_state return tf.foldl( step, tf.transpose(inputs, [1, 0, 2, 3]), initializer=inputs, parallel_iterations=1, swap_memory=True)
Example #7
Source File: test_utils.py From model-optimization with Apache License 2.0 | 6 votes |
def get_tensor_with_random_shape(expected_num_elements=10, source_fn=tf.random.uniform): """Returns a 1-D `Tensor` with random shape. The `Tensor` is created by creating a `Tensor` with `2*expected_num_elements` and inlcude each element in the rerurned `Tensor` with probability `0.5`. Thus, the returned `Tensor` has unknown, and non-deterministic shape. Args: expected_num_elements: The number of elements the returned `Tensor` should have on expectation. source_fn: A Python callable that generates values for the returned `Tensor`. Returns: A 1-D `Tensor` with random shape. """ return tf.squeeze( tf.gather( source_fn([2 * expected_num_elements]), tf.where( tf.less(tf.random.uniform([2 * expected_num_elements]), 0.5))), 1)
Example #8
Source File: autoencoders.py From fine-lm with MIT License | 6 votes |
def sample(self, features=None): del features hp = self.hparams div_x = 2**hp.num_hidden_layers div_y = 1 if self.is1d else 2**hp.num_hidden_layers size = [ hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y, hp.bottleneck_bits ] rand = tf.random_uniform(size) res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 # If you want to set some first bits to a fixed value, do this: # fixed = tf.zeros_like(rand) - 1.0 # nbits = 3 # res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1) return res
Example #9
Source File: autoencoders.py From fine-lm with MIT License | 6 votes |
def bottleneck(self, x): # pylint: disable=arguments-differ hparams = self.hparams if hparams.unordered: return super(AutoencoderOrderedDiscrete, self).bottleneck(x) noise = hparams.bottleneck_noise hparams.bottleneck_noise = 0.0 # We'll add noise below. x, loss = discretization.parametrized_bottleneck(x, hparams) hparams.bottleneck_noise = noise if hparams.mode == tf.estimator.ModeKeys.TRAIN: # We want a number p such that p^bottleneck_bits = 1 - noise. # So log(p) * bottleneck_bits = log(noise) log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits) # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits. noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1)) # Having the no-noise mask, we can make noise just uniformly at random. ordered_noise = tf.random_uniform(tf.shape(x)) # We want our noise to be 1s at the start and random {-1, 1} bits later. ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise)) # Now we flip the bits of x on the noisy positions (ordered and normal). x *= 2.0 * ordered_noise - 1 return x, loss
Example #10
Source File: dsn.py From DOTA_models with Apache License 2.0 | 6 votes |
def dsn_loss_coefficient(params): """The global_step-dependent weight that specifies when to kick in DSN losses. Args: params: A dictionary of parameters. Expecting 'domain_separation_startpoint' Returns: A weight to that effectively enables or disables the DSN-related losses, i.e. similarity, difference, and reconstruction losses. """ return tf.where( tf.less(slim.get_or_create_global_step(), params['domain_separation_startpoint']), 1e-10, 1.0) ################################################################################ # MODEL CREATION ################################################################################
Example #11
Source File: neural_gpu.py From DOTA_models with Apache License 2.0 | 6 votes |
def memory_run(step, nmaps, mem_size, batch_size, vocab_size, global_step, do_training, update_mem, decay_factor, num_gpus, target_emb_weights, output_w, gpu_targets_tn, it): """Run memory.""" q = step[:, 0, it, :] mlabels = gpu_targets_tn[:, it, 0] res, mask, mem_loss = memory_call( q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem) res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1) # Mix gold and original in the first steps, 20% later. gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7) use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor) use_gold = tf.maximum(use_gold, 0.2) * do_training mem = tf.cond(tf.less(tf.random_uniform([]), use_gold), lambda: use_gold * gold + (1.0 - use_gold) * res, lambda: res) mem = tf.reshape(mem, [-1, 1, 1, nmaps]) return mem, mem_loss, update_mem
Example #12
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function """ diff = prediction_tensor - target_tensor abs_diff = tf.abs(diff) abs_diff_lt_1 = tf.less(abs_diff, 1) anchorwise_smooth_l1norm = tf.reduce_sum( tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5), 2) * weights if self._anchorwise_output: return anchorwise_smooth_l1norm return tf.reduce_sum(anchorwise_smooth_l1norm)
Example #13
Source File: ssd_augmenter.py From lambda-deep-learning-demo with Apache License 2.0 | 6 votes |
def random_flip_left_right(image, bboxes, seed=None): """Random flip left-right of an image and its bounding boxes. """ def flip_bboxes(bboxes): """Flip bounding boxes coordinates. """ bboxes = tf.stack([1.0 - bboxes[:, 2], bboxes[:, 1], 1.0 - bboxes[:, 0], bboxes[:, 3]], axis=-1) return bboxes # Random flip. Tensorflow implementation. with tf.name_scope('random_flip_left_right'): image = tf.convert_to_tensor(image, name='image') uniform_random = tf.random.uniform([], 0, 1.0, seed=seed) mirror_cond = tf.less(uniform_random, .5) # Flip image. image = tf.cond(mirror_cond, lambda: tf.image.flip_left_right(image), lambda: image) # Flip bboxes. bboxes = tf.cond(mirror_cond, lambda: flip_bboxes(bboxes), lambda: bboxes) return image, bboxes
Example #14
Source File: graph_builder.py From DOTA_models with Apache License 2.0 | 6 votes |
def _create_learning_rate(hyperparams, step_var): """Creates learning rate var, with decay and switching for CompositeOptimizer. Args: hyperparams: a GridPoint proto containing optimizer spec, particularly learning_method to determine optimizer class to use. step_var: tf.Variable, global training step. Returns: a scalar `Tensor`, the learning rate based on current step and hyperparams. """ if hyperparams.learning_method != 'composite': base_rate = hyperparams.learning_rate else: spec = hyperparams.composite_optimizer_spec switch = tf.less(step_var, spec.switch_after_steps) base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate), lambda: tf.constant(spec.method2.learning_rate)) return tf.train.exponential_decay( base_rate, step_var, hyperparams.decay_steps, hyperparams.decay_base, staircase=hyperparams.decay_staircase)
Example #15
Source File: ngctc_loss.py From taco with GNU General Public License v3.0 | 6 votes |
def ngctc_decode(term_probs, targets,seq_len,tar_len): max_seq_len = tf.to_int32(tf.reduce_max(seq_len)) bs = tf.to_int32(tf.shape(term_probs)[0]) #loss = 0. cond = lambda j,loss: tf.less(j, bs) j = tf.constant(0,dtype=tf.int32) decoded = tf.zeros([1,max_seq_len],dtype=tf.int32) def body(j,decoded): idx = tf.expand_dims(targets[j,:tar_len[j]],1) st = tf.transpose(term_probs[j], (1, 0)) st = tf.transpose(tf.gather_nd(st, idx), (1, 0)) length = tf.to_int32(seq_len[j]) alphas = forward_ngctc(st, length,inference=True) # get essentially the probability of being at each node dec = tf.to_int32(tf.argmax(alphas,axis=1)) # decode that by taking the argmax for each column of alphas dec = tf.concat([dec,tf.zeros([max_seq_len-length],dtype=tf.int32)],axis=0) decoded = tf.concat([decoded,[dec]],axis=0) return tf.add(j,1),decoded out = tf.while_loop(cond,body,loop_vars= [j,decoded],shape_invariants=[tf.TensorShape(None),tf.TensorShape([None, None])]) return out[1]
Example #16
Source File: memory.py From DOTA_models with Apache License 2.0 | 6 votes |
def get_hash_slots(self, query): """Gets hashed-to buckets for batch of queries. Args: query: 2-d Tensor of query vectors. Returns: A list of hashed-to buckets for each hash function. """ binary_hash = [ tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0) for i in xrange(self.num_libraries)] hash_slot_idxs = [ tf.reduce_sum( tf.to_int32(binary_hash[i]) * tf.constant([[2 ** i for i in xrange(self.num_hashes)]], dtype=tf.int32), 1) for i in xrange(self.num_libraries)] return hash_slot_idxs
Example #17
Source File: ops.py From spektral with MIT License | 6 votes |
def repeat(x, repeats): """ Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D tensors). :param x: rank 1 Tensor; :param repeats: rank 1 Tensor with same shape as x, the number of repetitions for each element; :return: rank 1 Tensor, of shape `(sum(repeats), )`. """ x = tf.expand_dims(x, 1) max_repeats = tf.reduce_max(repeats) tile_repeats = [1, max_repeats] arr_tiled = tf.tile(x, tile_repeats) mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1)) result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1]) return result
Example #18
Source File: losses.py From object_detector_app with MIT License | 6 votes |
def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function """ diff = prediction_tensor - target_tensor abs_diff = tf.abs(diff) abs_diff_lt_1 = tf.less(abs_diff, 1) anchorwise_smooth_l1norm = tf.reduce_sum( tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5), 2) * weights if self._anchorwise_output: return anchorwise_smooth_l1norm return tf.reduce_sum(anchorwise_smooth_l1norm)
Example #19
Source File: postU.py From decompose with MIT License | 5 votes |
def update(self, U: List[Tensor], X: Tensor, transform: bool) -> Tuple[Tensor]: f, K = self.__f, self.__K U = copy(U) # copy the list since we change it below # update hyper parameters if not transform: self.prior.update(data=tf.transpose(U[f])) else: self.prior.fitLatents(data=tf.transpose(U[f])) # prepare update of the f-th factor prepVars = self.__likelihood.prepVars(f=f, U=U, X=X) # update the filters of the f-th factor def cond(k, U): return(tf.less(k, K)) def body(k, U): U = self.updateK(k, prepVars, U) return(k+1, U) k = tf.constant(0) loop_vars = [k, U] _, U = tf.while_loop(cond, body, loop_vars) return(U[f])
Example #20
Source File: univariate.py From zhusuan with MIT License | 5 votes |
def _sample(self, n_samples): p = tf.sigmoid(self.logits) shape = tf.concat([[n_samples], self.batch_shape], 0) alpha = tf.random_uniform( shape, minval=0, maxval=1, dtype=self.param_dtype) samples = tf.cast(tf.less(alpha, p), dtype=self.dtype) static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape( tf.TensorShape([static_n_samples]).concatenate( self.get_batch_shape())) return samples
Example #21
Source File: tac_loss.py From taco with GNU General Public License v3.0 | 5 votes |
def tac_decode(action_probs, term_probs, targets,seq_len,tar_len): # For now a non batch version. # T length of trajectory. D size of dictionary. l length of label. B batch_size # actions_prob_tensors.shape [B,max(seq_len),D] # stop_tensors.shape [B,max(seq_len),D,2] # # targets.shape [B,max(tar_len)] # zero padded label sequences. # seq_len the actual length of each sequence. # tar_len the actual length of each target sequence # because the loss was only implemented per example, the batch version is simply in a loop rather than a matrix. max_seq_len = tf.to_int32(tf.reduce_max(seq_len)) bs = tf.to_int32(tf.shape(action_probs)[0]) #loss = 0. cond = lambda j,loss: tf.less(j, bs) j = tf.constant(0,dtype=tf.int32) decoded = tf.zeros([1,max_seq_len],dtype=tf.int32) def body(j,decoded): idx = tf.expand_dims(targets[j,:tar_len[j]],1) ac = tf.transpose(tf.gather_nd(tf.transpose(action_probs[j]), idx)) st = tf.transpose(term_probs[j], (1, 0, 2)) st = tf.transpose(tf.gather_nd(st, idx), (1, 0, 2)) length = tf.to_int32(seq_len[j]) alphas = forward_tac_tf(ac, st, length,inference=True) # get essentially the probability of being at each node dec = tf.to_int32(tf.argmax(alphas,axis=1)) # decode that by taking the argmax for each column of alphas dec = tf.concat([dec,tf.zeros([max_seq_len-length],dtype=tf.int32)],axis=0) decoded = tf.concat([decoded,[dec]],axis=0) return tf.add(j,1),decoded out = tf.while_loop(cond,body,loop_vars= [j,decoded],shape_invariants=[tf.TensorShape(None),tf.TensorShape([None, None])]) return out[1]
Example #22
Source File: tac_loss.py From taco with GNU General Public License v3.0 | 5 votes |
def tac_loss(action_probs, term_probs, targets,seq_len,tar_len,safe = False): # For now a non batch version. # T length of trajectory. D size of dictionary. l length of label. B batch_size # actions_prob_tensors.shape [B,max(seq_len),D] # stop_tensors.shape [B,max(seq_len),D,2] # # targets.shape [B,max(tar_len)] # zero padded label sequences. # seq_len the actual length of each sequence. # tar_len the actual length of each target sequence # because the loss was only implemented per example, the batch version is simply in a loop rather than a matrix. bs = tf.to_int32(tf.shape(action_probs)[0]) #loss = 0. cond = lambda j,loss: tf.less(j, bs) j = tf.constant(0,dtype=tf.int32) loss = tf.constant(0,dtype=tf.float64) def body(j,loss): idx = tf.expand_dims(targets[j,:tar_len[j]],1) ac = tf.transpose(tf.gather_nd(tf.transpose(action_probs[j]), idx)) st = tf.transpose(term_probs[j], (1, 0, 2)) st = tf.transpose(tf.gather_nd(st, idx), (1, 0, 2)) length = seq_len[j] if safe: loss += -forward_tac_log(ac, st, length) / tf.to_double(bs) # negative log likelihood else: loss += -tf.reduce_sum(tf.log(forward_tac_tf(ac, st, length))/tf.to_double(bs)) # negative log likelihood for whole batch return tf.add(j,1),loss # average loss over batches out = tf.while_loop(cond,body,loop_vars= [j,loss]) return out[1]
Example #23
Source File: hmc.py From zhusuan with MIT License | 5 votes |
def _init_step_size(self, q, p, mass, get_gradient, get_log_posterior): factor = 1.5 def loop_cond(step_size, last_acceptance_rate, cond): return cond def loop_body(step_size, last_acceptance_rate, cond): # Calculate acceptance_rate new_q, new_p = leapfrog_integrator( q, p, tf.constant(0.0), step_size / 2, get_gradient, mass) new_q, new_p = leapfrog_integrator( new_q, new_p, step_size, step_size / 2, get_gradient, mass) __, _, _, _, acceptance_rate = get_acceptance_rate( q, p, new_q, new_p, get_log_posterior, mass, self.data_axes) acceptance_rate = tf.reduce_mean(acceptance_rate) # Change step size and stopping criteria new_step_size = tf.cond( tf.less(acceptance_rate, self.target_acceptance_rate), lambda: step_size * (1.0 / factor), lambda: step_size * factor) cond = tf.logical_not(tf.logical_xor( tf.less(last_acceptance_rate, self.target_acceptance_rate), tf.less(acceptance_rate, self.target_acceptance_rate))) return [new_step_size, acceptance_rate, cond] new_step_size, _, _ = tf.while_loop( loop_cond, loop_body, [self.step_size, tf.constant(1.0), tf.constant(True)] ) return new_step_size
Example #24
Source File: utils.py From zhusuan with MIT License | 5 votes |
def __lt__(self, other): return tf.less(self, other)
Example #25
Source File: LookupConvolution2d.py From tf-lcnn with GNU General Public License v3.0 | 5 votes |
def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis].value kernel_shape = self.kernel_size + (input_dim, self.filters) # dense kernel self.kernel_pre = self.add_variable(name='kernel_pre', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, trainable=True, dtype=self.dtype) conv_th = tf.ones_like(self.kernel_pre) * self.sparse_th conv_zero = tf.zeros_like(self.kernel_pre) cond = tf.less(tf.abs(self.kernel_pre), conv_th) self.kernel = tf.where(cond, conv_zero, self.kernel_pre, name='kernel') if self.use_bias: self.bias = self.add_variable(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, trainable=True, dtype=self.dtype) else: self.bias = None self.input_spec = base.InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) self.built = True
Example #26
Source File: input_fn.py From imitation-learning with MIT License | 5 votes |
def _rand_coarse_pixelwise_dropout(img): coin = tf.less(tf.random_uniform([], 0.0, 1.0), 0.5) p_pixel_drop = tf.random_uniform([], 0, 0.1) p_height = tf.random_uniform([], 0.08, 0.2) p_width = tf.random_uniform([], 0.08, 0.2) new_img = tf.cond( coin, lambda: img_aug.coarse_pixelwise_dropout(img, p_height, p_width, p_pixel_drop, True), lambda: img_aug.coarse_pixelwise_dropout(img, p_height, p_width, p_pixel_drop, False), ) return new_img
Example #27
Source File: balanced_positive_negative_sampler.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def _get_values_from_start_and_end(self, input_tensor, num_start_samples, num_end_samples, total_num_samples): """slices num_start_samples and last num_end_samples from input_tensor. Args: input_tensor: An int32 tensor of shape [N] to be sliced. num_start_samples: Number of examples to be sliced from the beginning of the input tensor. num_end_samples: Number of examples to be sliced from the end of the input tensor. total_num_samples: Sum of is num_start_samples and num_end_samples. This should be a scalar. Returns: A tensor containing the first num_start_samples and last num_end_samples from input_tensor. """ input_length = tf.shape(input_tensor)[0] start_positions = tf.less(tf.range(input_length), num_start_samples) end_positions = tf.greater_equal( tf.range(input_length), input_length - num_end_samples) selected_positions = tf.logical_or(start_positions, end_positions) selected_positions = tf.cast(selected_positions, tf.float32) indexed_positions = tf.multiply(tf.cumsum(selected_positions), selected_positions) one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, total_num_samples, dtype=tf.float32) return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), one_hot_selector, axes=[0, 0]), tf.int32)
Example #28
Source File: box_list_ops.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def prune_non_overlapping_boxes( boxlist1, boxlist2, min_overlap=0.0, scope=None): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. min_overlap: Minimum required overlap between boxes, to count them as overlapping. scope: name scope. Returns: new_boxlist1: A pruned boxlist with size [N', 4]. keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the first input BoxList `boxlist1`. """ with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1]) new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1, keep_inds
Example #29
Source File: box_list_ops.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def prune_outside_window(boxlist, window, scope=None): """Prunes bounding boxes that fall outside a given window. This function prunes bounding boxes that even partially fall outside the given window. See also clip_to_window which only prunes bounding boxes that fall completely outside the window, and clips any bounding boxes that partially overflow. Args: boxlist: a BoxList holding M_in boxes. window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of the window scope: name scope. Returns: pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ with tf.name_scope(scope, 'PruneOutsideWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) coordinate_violations = tf.concat([ tf.less(y_min, win_y_min), tf.less(x_min, win_x_min), tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max) ], 1) valid_indices = tf.reshape( tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) return gather(boxlist, valid_indices), valid_indices
Example #30
Source File: localizer.py From cnn-levelset with MIT License | 5 votes |
def smooth_l1(x): x = tf.abs(x) x = tf.select( tf.less(x, 1), tf.mul(tf.square(x), 0.5), tf.sub(x, 0.5) ) x = tf.reshape(x, shape=[-1, 4]) x = tf.reduce_sum(x, 1) return x