Python tensorflow.contrib.layers.python.layers.utils.smart_cond() Examples
The following are 12
code examples of tensorflow.contrib.layers.python.layers.utils.smart_cond().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.layers.python.layers.utils
, or try the search function
.
Example #1
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_value(self): fn1 = lambda: 'fn1' fn2 = lambda: 'fn2' expected = lambda v: 'fn1' if v else 'fn2' for v in [True, False, 1, 0]: o = utils.smart_cond(tf.constant(v), fn1, fn2) self.assertEqual(o, expected(v))
Example #2
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_constant(self): fn1 = lambda: tf.constant('fn1') fn2 = lambda: tf.constant('fn2') expected = lambda v: b'fn1' if v else b'fn2' for v in [True, False, 1, 0]: o = utils.smart_cond(tf.constant(v), fn1, fn2) with self.test_session(): self.assertEqual(o.eval(), expected(v))
Example #3
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_variable(self): fn1 = lambda: tf.Variable('fn1') fn2 = lambda: tf.Variable('fn2') expected = lambda v: b'fn1' if v else b'fn2' for v in [True, False, 1, 0]: o = utils.smart_cond(tf.constant(v), fn1, fn2) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertEqual(o.eval(), expected(v))
Example #4
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_tensors(self): fn1 = lambda: tf.constant(0) - tf.constant(1) fn2 = lambda: tf.constant(0) - tf.constant(2) expected = lambda v: -1 if v else -2 for v in [True, False, 1, 0]: o = utils.smart_cond(tf.constant(v), fn1, fn2) with self.test_session(): self.assertEqual(o.eval(), expected(v))
Example #5
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_value(self): fn1 = lambda: tf.convert_to_tensor('fn1') fn2 = lambda: tf.convert_to_tensor('fn2') expected = lambda v: b'fn1' if v else b'fn2' p = tf.placeholder(tf.bool, []) for v in [True, False, 1, 0]: o = utils.smart_cond(p, fn1, fn2) with self.test_session(): self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
Example #6
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_variable(self): fn1 = lambda: tf.Variable('fn1') fn2 = lambda: tf.Variable('fn2') expected = lambda v: b'fn1' if v else b'fn2' p = tf.placeholder(tf.bool, []) for v in [True, False, 1, 0]: o = utils.smart_cond(p, fn1, fn2) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
Example #7
Source File: utils_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_tensors(self): fn1 = lambda: tf.constant(0) - tf.constant(1) fn2 = lambda: tf.constant(0) - tf.constant(2) expected = lambda v: -1 if v else -2 p = tf.placeholder(tf.bool, []) for v in [True, False, 1, 0]: o = utils.smart_cond(p, fn1, fn2) with self.test_session(): self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
Example #8
Source File: layers.py From deep_image_model with Apache License 2.0 | 5 votes |
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: the tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation. """ with ops.name_scope(scope, 'Dropout', [inputs]) as sc: inputs = ops.convert_to_tensor(inputs) dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape) id_fn = lambda: array_ops.identity(inputs) outputs = utils.smart_cond(is_training, dropout_fn, id_fn) return utils.collect_named_outputs(outputs_collections, sc, outputs)
Example #9
Source File: batch_norm.py From chemopt with MIT License | 5 votes |
def _build_update_ops_variance(self, mean, variance, is_training): def build_update_ops(): update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=variance, decay=self._decay_rate, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the # value of `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_variance_op)
Example #10
Source File: batch_norm.py From chemopt with MIT License | 5 votes |
def _build_update_ops_second_moment(self, mean, second_moment, is_training): def build_update_ops(): update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment, value=second_moment, decay=self._decay_rate, name="update_moving_second_moment").op return update_mean_op, update_second_moment_op def build_no_ops(): return (tf.no_op(), tf.no_op()) is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op)
Example #11
Source File: batch_norm.py From chemopt with MIT License | 4 votes |
def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): self._moving_mean = tf.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.VARIABLES], initializer=tf.zeros_initializer, trainable=False) self._moving_variance = tf.get_variable( "moving_variance", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.VARIABLES], initializer=tf.ones_initializer(), trainable=False) def build_batch_stats(): """Builds the batch statistics calculation ops.""" shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance),) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance
Example #12
Source File: batch_norm.py From chemopt with MIT License | 4 votes |
def _build_statistics_second_moment(self, input_batch, reduction_indices, use_batch_stats): self._moving_mean = tf.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.VARIABLES], initializer=tf.zeros_initializer, trainable=False) self._moving_second_moment = tf.get_variable( "moving_second_moment", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.VARIABLES], initializer=tf.ones_initializer(), trainable=False) self._moving_variance = tf.sub(self._moving_second_moment, tf.square(self._moving_mean), name="moving_variance") def build_batch_stats(): shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), tf.identity(self._moving_second_moment), ) mean, variance, second_moment = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance, second_moment