Python tensorflow.while() Examples
The following are 3
code examples of tensorflow.while().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: native_module_test.py From hub with Apache License 2.0 | 6 votes |
def testUseWithinWhileLoop(self): with tf.Graph().as_default(): spec = hub.create_module_spec(double_module_fn) m = hub.Module(spec) i = tf.constant(0) x = tf.constant(10.0) p = tf_v1.placeholder(dtype=tf.int32) c = lambda i, x: tf.less(i, p) b = lambda i, x: (tf.add(i, 1), m(x)) oi, ox = tf.while_loop(c, b, [i, x]) # ox = v**p * x v = m.variables[0] dodv = tf.gradients(ox, v)[0] # d ox / dv = p*v**(p-1) * x dodx = tf.gradients(ox, x)[0] # d ox / dx = v**p with tf_v1.Session() as sess: sess.run(tf_v1.global_variables_initializer()) self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 1}), [1, 20]) self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 2}), [2, 40]) self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 4}), [4, 160]) # Gradients also use the control flow structures setup earlier. # Also check they are working properly. self.assertAllEqual(sess.run([dodv, dodx], feed_dict={p: 1}), [10, 2]) self.assertAllEqual(sess.run([dodv, dodx], feed_dict={p: 2}), [40, 4]) self.assertAllEqual(sess.run([dodv, dodx], feed_dict={p: 4}), [320, 16]) # tf.map_fn() is merely a wrapper around tf.while(), but just to be sure...
Example #2
Source File: layers.py From fold with Apache License 2.0 | 5 votes |
def __init__(self, input_type=None, output_type=None, name_or_scope=None): """Creates the layer. Args: input_type: A type. output_type: A type. name_or_scope: A string or variable scope. If a string, a new variable scope will be created by calling [`create_variable_scope`](#create_variable_scope), with defaults inherited from the current variable scope. If no caching device is set, it will be set to `lambda op: op.device`. This is because `tf.while` can be very inefficient if the variables it uses are not cached locally. """ if name_or_scope is None: name_or_scope = type(self).__name__ if isinstance(name_or_scope, tf.VariableScope): self._vscope = name_or_scope name = str(self._vscope.name) elif isinstance(name_or_scope, six.string_types): self._vscope = create_variable_scope(name_or_scope) name = name_or_scope else: raise TypeError('name_or_scope must be a tf.VariableScope or a string: ' '%s' % (name_or_scope,)) if self._vscope.caching_device is None: self._vscope.set_caching_device(lambda op: op.device) super(Layer, self).__init__(input_type, output_type, name) if not hasattr(self, '_constructor_name'): self._constructor_name = '__.%s' % self.__class__.__name__ if not hasattr(self, '_constructor_args'): self._constructor_args = None if not hasattr(self, '_constructor_kwargs'): self._constructor_kwargs = None
Example #3
Source File: elpips.py From elpips with BSD 2-Clause "Simplified" License | 4 votes |
def forward(self, image, reference): '''Evaluates distances between images in 'image' and 'reference' (data in NHWC order). Returns an N-element distance vector. If 'image' is a tuple, evaluates all the images in the tuple with the same input transformations and dropout as 'reference'. A different set of input transformations for each would result in unnecessary uncertainty in determining which of the images is closest to the reference. The returned value is a tuple of N-element distance vectors.''' if isinstance(image, list): raise Exception('Parameter \'image\' must be a tensor or a tuple of tensors.') image_in = as_tuple(image) def cond(i, loss_sum): return tf.less(i, tf.cast(self.config.average_over, tf.int32)) def body(i, loss_sum): ensemble = self.sample_ensemble(self.config) ensemble_X = for_each(image_in, lambda X: apply_ensemble(self.config, ensemble, X)) ensemble_X = for_each(ensemble_X, lambda X: 2.0 * X - 1.0) ensemble_R = apply_ensemble(self.config, ensemble, reference) ensemble_R = 2.0 * ensemble_R - 1.0 loss = self.network.forward(ensemble_X, ensemble_R) loss_sum += tf.stack(loss, axis=0) loss_sum.set_shape([len(image_in), self.config.batch_size]) return i+1, loss_sum if isinstance(self.config.average_over, numbers.Number) and self.config.average_over == 1: # Skip tf.while for trivial single iterations. _, loss_sum = body(0, tf.zeros([len(image_in), self.config.batch_size], dtype=self.config.dtype)) else: # Run multiple times for any other average_over count. _, loss_sum = tf.while_loop(cond, body, (0, tf.zeros([len(image_in), self.config.batch_size], dtype=self.config.dtype)), back_prop=self.back_prop) loss_sum /= tf.cast(self.config.average_over, self.config.dtype) if isinstance(image, tuple): return tuple((loss_sum[i, :] for i in range(len(image)))) else: return tf.reshape(loss_sum, [self.config.batch_size])