Python tensorflow.while_loop() Examples
The following are 30
code examples of tensorflow.while_loop().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: hmc.py From zhusuan with MIT License | 7 votes |
def _leapfrog(self, q, p, step_size, get_gradient, mass): def loop_cond(i, q, p): return i < self.n_leapfrogs + 1 def loop_body(i, q, p): step_size1 = tf.cond(i > 0, lambda: step_size, lambda: tf.constant(0.0, dtype=tf.float32)) step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs), tf.less(0, i)), lambda: step_size, lambda: step_size / 2) q, p = leapfrog_integrator(q, p, step_size1, step_size2, lambda q: get_gradient(q), mass) return [i + 1, q, p] i = tf.constant(0) _, q, p = tf.while_loop(loop_cond, loop_body, [i, q, p], back_prop=False, parallel_iterations=1) return q, p
Example #2
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def build_pgd_attack(self, eps): victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32) def one_step_attack(image, grad): """ core components of this attack are: (a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf) (b) momentum (https://arxiv.org/pdf/1710.06081.pdf) (c) input diversity (https://arxiv.org/pdf/1803.06978.pdf) """ orig_image = image image = self.structure(image) image = (image - 127.5) / 128.0 image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2) prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.reshape(embeddings[0], [512, 1]) objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized noise, = tf.gradients(objective, orig_image) noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True) noise = 0.9 * grad + noise adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound) return adv, noise input = tf.to_float(self.image_batch) lower_bound = tf.clip_by_value(input - eps, 0, 255.) upper_bound = tf.clip_by_value(input + eps, 0, 255.) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): adv, _ = tf.while_loop( lambda _, __: True, one_step_attack, (input, tf.zeros_like(input)), back_prop=False, maximum_iterations=100, parallel_iterations=1) self.adv_image = adv return adv
Example #3
Source File: generate.py From Chinese-Poetry-Generation with MIT License | 6 votes |
def _reshape_decoder_outputs(self): """ Reshape decoder_outputs into shape [?, _NUM_UNITS]. """ def concat_output_slices(idx, val): output_slice = tf.slice( input_ = self.decoder_outputs, begin = [idx, 0, 0], size = [1, self.decoder_input_length[idx], _NUM_UNITS]) return tf.add(idx, 1),\ tf.concat([val, tf.squeeze(output_slice, axis = 0)], axis = 0) tf_i = tf.constant(0) tf_v = tf.zeros(shape = [0, _NUM_UNITS], dtype = tf.float32) _, reshaped_outputs = tf.while_loop( cond = lambda i, v: i < _BATCH_SIZE, body = concat_output_slices, loop_vars = [tf_i, tf_v], shape_invariants = [tf.TensorShape([]), tf.TensorShape([None, _NUM_UNITS])]) tf.TensorShape([None, _NUM_UNITS]).\ assert_same_rank(reshaped_outputs.shape) return reshaped_outputs
Example #4
Source File: compiler_test.py From federated with Apache License 2.0 | 6 votes |
def test_import_tf_comp_with_while_loop(self): @computations.tf_computation(tf.float32) def comp(x): # An example of a loop with variables that computes 2^x by counting from # x down to 0, and doubling the result in each iteration. a = tf.Variable(0.0) b = tf.Variable(1.0) with tf.control_dependencies([a.initializer, b.initializer]): with tf.control_dependencies([a.assign(x)]): cond_fn = lambda a, b: a > 0 body_fn = lambda a, b: (a - 1.0, b * 2.0) return tf.while_loop(cond_fn, body_fn, (a, b))[1] module, mlir = self._import_compile_and_return_module_and_mlir(comp) # Not checking the full MLIR in the long generated body, just that we can # successfully ingest TF code containing a while loop here, end-to-end. We # need some form of looping support in lieu of `tf.data.Dataset.reduce()`. self._assert_mlir_contains_pattern( mlir, ['func @fn(%arg0: tensor<f32>) -> tensor<f32>']) result = runtime.compile_and_run_on_args(module, backend_info.VULKAN_SPIRV, np.float32(5.0)) self.assertEqual(result, 32.0)
Example #5
Source File: ngctc_loss.py From taco with GNU General Public License v3.0 | 6 votes |
def ngctc_decode(term_probs, targets,seq_len,tar_len): max_seq_len = tf.to_int32(tf.reduce_max(seq_len)) bs = tf.to_int32(tf.shape(term_probs)[0]) #loss = 0. cond = lambda j,loss: tf.less(j, bs) j = tf.constant(0,dtype=tf.int32) decoded = tf.zeros([1,max_seq_len],dtype=tf.int32) def body(j,decoded): idx = tf.expand_dims(targets[j,:tar_len[j]],1) st = tf.transpose(term_probs[j], (1, 0)) st = tf.transpose(tf.gather_nd(st, idx), (1, 0)) length = tf.to_int32(seq_len[j]) alphas = forward_ngctc(st, length,inference=True) # get essentially the probability of being at each node dec = tf.to_int32(tf.argmax(alphas,axis=1)) # decode that by taking the argmax for each column of alphas dec = tf.concat([dec,tf.zeros([max_seq_len-length],dtype=tf.int32)],axis=0) decoded = tf.concat([decoded,[dec]],axis=0) return tf.add(j,1),decoded out = tf.while_loop(cond,body,loop_vars= [j,decoded],shape_invariants=[tf.TensorShape(None),tf.TensorShape([None, None])]) return out[1]
Example #6
Source File: math_helpers.py From graphics with Apache License 2.0 | 6 votes |
def double_factorial(n): """Computes the double factorial of `n`. Note: In the following, A1 to An are optional batch dimensions. Args: n: A tensor of shape `[A1, ..., An]` containing positive integer values. Returns: A tensor of shape `[A1, ..., An]` containing the double factorial of `n`. """ n = tf.convert_to_tensor(value=n) two = tf.ones_like(n) * 2 result = tf.ones_like(n) _, result, _ = tf.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two]) return result
Example #7
Source File: beam_search.py From models with Apache License 2.0 | 6 votes |
def search(self, initial_ids, initial_cache): """Beam search for sequences with highest scores.""" state, state_shapes = self._create_initial_state(initial_ids, initial_cache) finished_state = tf.while_loop( cond=self._continue_search, body=self._search_step, loop_vars=[state], shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False) finished_state = finished_state[0] alive_seq = finished_state[_StateKeys.ALIVE_SEQ] alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS] finished_seq = finished_state[_StateKeys.FINISHED_SEQ] finished_scores = finished_state[_StateKeys.FINISHED_SCORES] finished_flags = finished_state[_StateKeys.FINISHED_FLAGS] # Account for corner case where there are no finished sequences for a # particular batch item. In that case, return alive sequences for that batch # item. finished_seq = tf.compat.v1.where( tf.reduce_any(input_tensor=finished_flags, axis=1), finished_seq, alive_seq) finished_scores = tf.compat.v1.where( tf.reduce_any(input_tensor=finished_flags, axis=1), finished_scores, alive_log_probs) return finished_seq, finished_scores
Example #8
Source File: tensor_ops.py From hart with GNU General Public License v3.0 | 6 votes |
def broadcast_against(tensor, against_expr): """Adds trailing dimensions to mask to enable broadcasting against data :param tensor: tensor to be broadcasted :param against_expr: tensor will be broadcasted against it :return: mask expr with tf.rank(mask) == tf.rank(data) """ def cond(data, tensor): return tf.less(tf.rank(tensor), tf.rank(data)) def body(data, tensor): return data, tf.expand_dims(tensor, -1) shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)] _, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants) return tensor
Example #9
Source File: beam_search.py From models with Apache License 2.0 | 6 votes |
def search(self, initial_ids, initial_cache): """Beam search for sequences with highest scores.""" state, state_shapes = self._create_initial_state(initial_ids, initial_cache) finished_state = tf.while_loop( cond=self._continue_search, body=self._search_step, loop_vars=[state], shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False) finished_state = finished_state[0] alive_seq = finished_state[_StateKeys.ALIVE_SEQ] alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS] finished_seq = finished_state[_StateKeys.FINISHED_SEQ] finished_scores = finished_state[_StateKeys.FINISHED_SCORES] finished_flags = finished_state[_StateKeys.FINISHED_FLAGS] # Account for corner case where there are no finished sequences for a # particular batch item. In that case, return alive sequences for that batch # item. finished_seq = tf.compat.v1.where( tf.reduce_any(input_tensor=finished_flags, axis=1), finished_seq, alive_seq) finished_scores = tf.compat.v1.where( tf.reduce_any(input_tensor=finished_flags, axis=1), finished_scores, alive_log_probs) return finished_seq, finished_scores
Example #10
Source File: token_generator_hmm.py From BERT with Apache License 2.0 | 5 votes |
def dynamic_span_mask_v1(batch_size, seq_len, hmm_tran_prob): state = tf.zeros((batch_size, 1), dtype=tf.int32) tran_size = bert_utils.get_shape_list(hmm_tran_prob, expected_rank=[2]) init_state_prob = tf.random_uniform([batch_size, tran_size[0]], minval=0.0, maxval=10.0, dtype=tf.float32) valid_init_state_mask = tf.expand_dims(tf.cast(tf.not_equal(hmm_tran_prob, 0)[:,0], tf.float32), axis=0) init_state_prob *= valid_init_state_mask init_state = tf.multinomial(tf.log(init_state_prob)+1e-10, num_samples=1, output_dtype=tf.int32) # batch x 1 print(batch_size, seq_len) def hmm_recurrence(i, cur_state, state): current_prob = tf.gather_nd(hmm_tran_prob, cur_state) print("===prob shape==", current_prob.get_shape()) next_state = tf.multinomial(tf.log(current_prob+1e-10), num_samples=1, output_dtype=tf.int32) state = tf.concat([state, next_state], axis=-1) print("state shape==", state.get_shape()) # state = state.write(i, next_state) # indices, [batch_size] return i+1, next_state, state _, _, state = tf.while_loop( cond=lambda i, _1, _2: i < seq_len, body=hmm_recurrence, loop_vars=(1, init_state, state), parallel_iterations=1, back_prop=False, shape_invariants=(tf.TensorShape(None), tf.TensorShape([None,None]), tf.TensorShape([None, None])) ) span_mask = tf.cast(tf.not_equal(state, 0), tf.int32) return state, span_mask
Example #11
Source File: eval_runner.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=10): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=parallel_iterations)
Example #12
Source File: utils.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, host_name): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 with tf.device(device_for_host(host_name)): return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=1)
Example #13
Source File: yolo_v1.py From blueoil with Apache License 2.0 | 5 votes |
def _gt_boxes_to_cell(self, gt_boxes_list): """Check gt_boxes are not dummy, create cell_gt_boxes and object_mask from the gt_boxes. Args: gt_boxes_list: Tensor [batch_size, max_num_boxes, 4(center_x, center_y, w, h)] Return: cell_gt_boxes: Tensor [batch_size, cell_size, cell_size, 4(center_x, center_y, w, h)]. copy from non dummy gt boxes coodinate to corresponding cell. object_masks: Tensor [batch_size, cell_size, cell_size, 1]. the cell that has gt boxes is 1, none is 0. """ cell_gt_boxes = [] object_masks = [] for batch_index in range(self.batch_size): i = tf.constant(0) gt_boxes = gt_boxes_list[batch_index, :, :] cell_gt_box = tf.zeros([self.cell_size, self.cell_size, 5]) object_mask = tf.zeros([self.cell_size, self.cell_size, 1]) _, _, result_cell_gt_box, result_object_mask = tf.while_loop( self._gt_boxes_to_cell_loop_cond, self._gt_boxes_to_cell_loop_body, [i, gt_boxes, cell_gt_box, object_mask] ) cell_gt_boxes.append(result_cell_gt_box) object_masks.append(result_object_mask) cell_gt_boxes = tf.stack(cell_gt_boxes) object_masks = tf.stack(object_masks) return cell_gt_boxes, object_masks
Example #14
Source File: common_layers.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def should_generate_summaries(): """Is this an appropriate context to generate summaries. Returns: a boolean """ name_scope = tf.contrib.framework.get_name_scope() if name_scope and "while/" in name_scope: # Summaries don't work well within tf.while_loop() return False if tf.get_variable_scope().reuse: # Avoid generating separate summaries for different data shards return False return True
Example #15
Source File: ncf_main.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=10): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=parallel_iterations)
Example #16
Source File: utils.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, host_name): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 with tf.device(device_for_host(host_name)): return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=1)
Example #17
Source File: eval_runner.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=10): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=parallel_iterations)
Example #18
Source File: low_level_runner.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, host_name): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 with tf.device(device_for_host(host_name)): return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=1)
Example #19
Source File: utils.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=1): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=parallel_iterations)
Example #20
Source File: utils.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, host_name): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 with tf.device(device_for_host(host_name)): return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=1)
Example #21
Source File: utils.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=1): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=parallel_iterations)
Example #22
Source File: attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def attack(self, x, y): """ This method creates a symbolic graph that given an input image, first randomly perturbs the image. The perturbation is bounded to an epsilon ball. Then multiple steps of gradient descent is performed to increase the probability of a target label or decrease the probability of the ground-truth label. :param x: A tensor with the input image. """ import tensorflow as tf from cleverhans.utils_tf import clip_eta if self.rand_init: eta = tf.random_uniform( tf.shape(x), -self.eps, self.eps, dtype=self.tf_dtype) eta = clip_eta(eta, self.ord, self.eps) else: eta = tf.zeros_like(x) def cond(i, _): return tf.less(i, self.nb_iter) def body(i, e): new_eta = self.attack_single_step(x, e, y) return i + 1, new_eta _, eta = tf.while_loop(cond, body, [tf.zeros([]), eta], back_prop=True) adv_x = x + eta if self.clip_min is not None and self.clip_max is not None: adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) return adv_x
Example #23
Source File: layers.py From mist-rnns with Apache License 2.0 | 5 votes |
def _compute_states(self): _inputs = tf.transpose(self.inputs, [1, 0, 2]) x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs) h_ta = tf.TensorArray(tf.float32, size=self.length) def cond(t, h, h_ta): return tf.less(t, self.length) def body(t, h, h_ta): x = x_ta.read(t) num_units, input_size = self.num_hidden_units, self.input_size with tf.variable_scope('gru'): r = tf.nn.sigmoid(self._linear(h, x, num_units, scope='r')) h_pre_act = r * h h_tilde = self.activation(self._linear(h_pre_act, x, num_units, scope='h')) z = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='z')) h_new = z * h + (1 - z) * h_tilde h_ta_new = h_ta.write(t, h_new) return t + 1, h_new, h_ta_new t = tf.constant(0) h = tf.squeeze(self.initial_states, [1]) _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta]) states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states') outputs = tf.identity(states, name='outputs') return outputs, states
Example #24
Source File: token_generator_hmm.py From BERT with Apache License 2.0 | 5 votes |
def dynamic_span_mask_v2(batch_size, seq_len, hmm_tran_prob): state = tf.zeros((batch_size, seq_len), dtype=tf.int32) tran_size = bert_utils.get_shape_list(hmm_tran_prob, expected_rank=[2]) init_state_prob = tf.random_uniform([batch_size, tran_size[0]], minval=0.0, maxval=10.0, dtype=tf.float32) valid_init_state_mask = tf.expand_dims(tf.cast(tf.not_equal(hmm_tran_prob, 0)[:,0], tf.float32), axis=0) init_state_prob *= valid_init_state_mask init_state = tf.multinomial(tf.log(init_state_prob)+1e-10, num_samples=1, output_dtype=tf.int32) # batch x 1 def hmm_recurrence(i, cur_state, state): current_prob = tf.gather_nd(hmm_tran_prob, cur_state) next_state = tf.multinomial(tf.log(current_prob+1e-10), num_samples=1, output_dtype=tf.int32) mask = tf.expand_dims(tf.one_hot(i, seq_len), axis=0) state = state + tf.cast(mask, tf.int32) * next_state return i+1, next_state, state _, _, state = tf.while_loop( cond=lambda i, _1, _2: i < seq_len, body=hmm_recurrence, loop_vars=(1, init_state, state), back_prop=False, ) span_mask = tf.cast(tf.not_equal(state, 0), tf.int32) return state, span_mask
Example #25
Source File: layers.py From mist-rnns with Apache License 2.0 | 5 votes |
def _compute_states(self): _inputs = tf.transpose(self.inputs, [1, 0, 2]) x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs) h_ta = tf.TensorArray(tf.float32, size=self.length) c_ta = tf.TensorArray(tf.float32, size=self.length) def cond(t, c, h, c_ta, h_ta): return tf.less(t, self.length) def body(t, c, h, c_ta, h_ta): x = x_ta.read(t) num_units, input_size = self.num_hidden_units, self.input_size with tf.variable_scope('lstm'): c_tilde = self.activation(self._linear(h, x, num_units, scope='c')) i = tf.nn.sigmoid(self._linear(h, x, num_units, scope='i')) f = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='f')) o = tf.nn.sigmoid(self._linear(h, x, num_units, scope='o')) c_new = i * c_tilde + f * c h_new = o * self.activation(c_new) c_ta_new = c_ta.write(t, c_new) h_ta_new = h_ta.write(t, h_new) return t + 1, c_new, h_new, c_ta_new, h_ta_new t = tf.constant(0) c, h = tf.split(tf.squeeze(self.initial_states, [1]), 2, axis=1) _, _, _, c_ta, h_ta = tf.while_loop(cond, body, [t, c, h, c_ta, h_ta]) outputs = tf.transpose(h_ta.stack(), [1, 0, 2], name='outputs') cells = tf.transpose(c_ta.stack(), [1, 0, 2]) states = tf.concat([cells, outputs], axis=2, name='states') return outputs, states
Example #26
Source File: layers.py From mist-rnns with Apache License 2.0 | 5 votes |
def _compute_states(self): """ Compute hidden states. Returns: A tuple, (outputs, states). """ _inputs = tf.transpose(self.inputs, [1, 0, 2]) x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs) h_ta = tf.TensorArray(tf.float32, size=self.length) def cond(t, h, h_ta): return tf.less(t, self.length) def body(t, h, h_ta): x = x_ta.read(t) num_units, input_size = self.num_hidden_units, self.input_size with tf.variable_scope('simple_rnn'): h_new = self.activation(self._linear(h, x, num_units, scope='simple_rnn')) h_ta_new = h_ta.write(t, h_new) return t + 1, h_new, h_ta_new t = tf.constant(0) h = tf.squeeze(self.initial_states, [1]) _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta]) states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states') outputs = tf.identity(states, name='outputs') return outputs, states
Example #27
Source File: ops.py From COCO-GAN with MIT License | 5 votes |
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False): # Usually num_iters = 1 will be enough W_shape = W.shape.as_list() W_reshaped = tf.reshape(W, [-1, W_shape[-1]]) if u is None: u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False) def power_iteration(i, u_i, v_i): v_ip1 = _l2normalize(tf.matmul(u_i, tf.transpose(W_reshaped))) u_ip1 = _l2normalize(tf.matmul(v_ip1, W_reshaped)) return i + 1, u_ip1, v_ip1 _, u_final, v_final = tf.while_loop( cond=lambda i, _1, _2: i < num_iters, body=power_iteration, loop_vars=(tf.constant(0, dtype=tf.int32), u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]])) ) if update_collection is None: warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable' '. Please consider using a update collection instead.') sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0] # sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final) W_bar = W_reshaped / sigma with tf.control_dependencies([u.assign(u_final)]): W_bar = tf.reshape(W_bar, W_shape) else: sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0] # sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final) W_bar = W_reshaped / sigma W_bar = tf.reshape(W_bar, W_shape) # Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op # has already been collected on the first call. if update_collection != NO_OPS: tf.add_to_collection(update_collection, u.assign(u_final)) if with_sigma: return W_bar, sigma else: return W_bar
Example #28
Source File: spherical_harmonics.py From graphics with Apache License 2.0 | 5 votes |
def _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1): n = m + 2 x, n, l, m, pmm, pmm1 = tf.while_loop( cond=_evaluate_legendre_polynomial_loop_cond, body=_evaluate_legendre_polynomial_loop_body, loop_vars=[x, n, l, m, pmm, pmm1]) return pmm1
Example #29
Source File: common_layers.py From BERT with Apache License 2.0 | 5 votes |
def should_generate_summaries(): """Is this an appropriate context to generate summaries. Returns: a boolean """ name_scope = tf.contrib.framework.get_name_scope() if name_scope and "while/" in name_scope: # Summaries don't work well within tf.while_loop() return False if tf.get_variable_scope().reuse: # Avoid generating separate summaries for different data shards return False return True
Example #30
Source File: net.py From CVTron with Apache License 2.0 | 5 votes |
def loss(self, predicts, labels, objects_num): """Add Loss to all the trainable variables Args: predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell] ===> (num_classes, boxes_per_cell, 4 * boxes_per_cell) labels : 3-D tensor of [batch_size, max_objects, 5] objects_num: 1-D tensor [batch_size] """ class_loss = tf.constant(0, tf.float32) object_loss = tf.constant(0, tf.float32) noobject_loss = tf.constant(0, tf.float32) coord_loss = tf.constant(0, tf.float32) loss = [0, 0, 0, 0] for i in range(self.batch_size): predict = predicts[i, :, :, :] label = labels[i, :, :] object_num = objects_num[i] results = tf.ones([7, 7, 2]) tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [ class_loss, object_loss, noobject_loss, coord_loss], predict, label, results]) for j in range(4): loss[j] = loss[j] + tuple_results[2][j] results = tuple_results[5] tf.add_to_collection( 'losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size) tf.summary.scalar('class_loss', loss[0]/self.batch_size) tf.summary.scalar('object_loss', loss[1]/self.batch_size) tf.summary.scalar('noobject_loss', loss[2]/self.batch_size) tf.summary.scalar('coord_loss', loss[3]/self.batch_size) tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection( 'losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size) return tf.add_n(tf.get_collection('losses'), name='total_loss'), results