Python tensorflow.python.ops.variable_scope.get_local_variable() Examples
The following are 6
code examples of tensorflow.python.ops.variable_scope.get_local_variable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.variable_scope
, or try the search function
.
Example #1
Source File: variable_scope_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testGetLocalVar(self): with self.test_session(): # Check that local variable respects naming. with tf.variable_scope("outer") as outer: with tf.variable_scope(outer, "default", []): local_var = variable_scope.get_local_variable( "w", [], collections=["foo"]) self.assertEqual(local_var.name, "outer/w:0") # Since variable is local, it should be in the local variable collection # but not the the trainable collection. self.assertIn(local_var, tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)) self.assertIn(local_var, tf.get_collection("foo")) self.assertNotIn( local_var, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)) # Check that local variable respects `reuse`. with tf.variable_scope(outer, "default", reuse=True): self.assertEqual(variable_scope.get_local_variable("w", []).name, "outer/w:0")
Example #2
Source File: ddpg.py From chi with MIT License | 5 votes |
def ornstein_uhlenbeck_noise(a, t_decay=100000): noise_var = get_local_variable("nm", initializer=tf.zeros(a.get_shape()[1:])) ou_theta = get_local_variable("ou_theta", initializer=0.2) ou_sigma = get_local_variable("ou_sigma", initializer=0.15) # ou_theta = tf.Print(ou_theta, [noise_var], 'noise: ', first_n=2000) ou_sigma = tf.train.exponential_decay(ou_sigma, tt.function.step(), t_decay, 1e-6) n = noise_var.assign_sub(ou_theta * noise_var - tf.random_normal(a.get_shape()[1:], stddev=ou_sigma)) return a + n
Example #3
Source File: resample.py From lambda-packs with MIT License | 4 votes |
def weighted_resample(inputs, weights, overall_rate, scope=None, mean_decay=0.999, seed=None): """Performs an approximate weighted resampling of `inputs`. This method chooses elements from `inputs` where each item's rate of selection is proportional to its value in `weights`, and the average rate of selection across all inputs (and many invocations!) is `overall_rate`. Args: inputs: A list of tensors whose first dimension is `batch_size`. weights: A `[batch_size]`-shaped tensor with each batch member's weight. overall_rate: Desired overall rate of resampling. scope: Scope to use for the op. mean_decay: How quickly to decay the running estimate of the mean weight. seed: Random seed. Returns: A list of tensors exactly like `inputs`, but with an unknown (and possibly zero) first dimension. A tensor containing the effective resampling rate used for each output. """ # Algorithm: Just compute rates as weights/mean_weight * # overall_rate. This way the average weight corresponds to the # overall rate, and a weight twice the average has twice the rate, # etc. with ops.name_scope(scope, 'weighted_resample', inputs) as opscope: # First: Maintain a running estimated mean weight, with zero debiasing # enabled (by default) to avoid throwing the average off. with variable_scope.variable_scope(scope, 'estimate_mean', inputs): estimated_mean = variable_scope.get_local_variable( 'estimated_mean', initializer=math_ops.cast(0, weights.dtype), dtype=weights.dtype) batch_mean = math_ops.reduce_mean(weights) mean = moving_averages.assign_moving_average( estimated_mean, batch_mean, mean_decay) # Then, normalize the weights into rates using the mean weight and # overall target rate: rates = weights * overall_rate / mean results = resample_at_rate([rates] + inputs, rates, scope=opscope, seed=seed, back_prop=False) return (results[1:], results[0])
Example #4
Source File: resample.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def weighted_resample(inputs, weights, overall_rate, scope=None, mean_decay=0.999, seed=None): """Performs an approximate weighted resampling of `inputs`. This method chooses elements from `inputs` where each item's rate of selection is proportional to its value in `weights`, and the average rate of selection across all inputs (and many invocations!) is `overall_rate`. Args: inputs: A list of tensors whose first dimension is `batch_size`. weights: A `[batch_size]`-shaped tensor with each batch member's weight. overall_rate: Desired overall rate of resampling. scope: Scope to use for the op. mean_decay: How quickly to decay the running estimate of the mean weight. seed: Random seed. Returns: A list of tensors exactly like `inputs`, but with an unknown (and possibly zero) first dimension. A tensor containing the effective resampling rate used for each output. """ # Algorithm: Just compute rates as weights/mean_weight * # overall_rate. This way the average weight corresponds to the # overall rate, and a weight twice the average has twice the rate, # etc. with ops.name_scope(scope, 'weighted_resample', inputs) as opscope: # First: Maintain a running estimated mean weight, with zero debiasing # enabled (by default) to avoid throwing the average off. with variable_scope.variable_scope(scope, 'estimate_mean', inputs): estimated_mean = variable_scope.get_local_variable( 'estimated_mean', initializer=math_ops.cast(0, weights.dtype), dtype=weights.dtype) batch_mean = math_ops.reduce_mean(weights) mean = moving_averages.assign_moving_average( estimated_mean, batch_mean, mean_decay) # Then, normalize the weights into rates using the mean weight and # overall target rate: rates = weights * overall_rate / mean results = resample_at_rate([rates] + inputs, rates, scope=opscope, seed=seed, back_prop=False) return (results[1:], results[0])
Example #5
Source File: resample.py From deep_image_model with Apache License 2.0 | 4 votes |
def weighted_resample(inputs, weights, overall_rate, scope=None, mean_decay=0.999, warmup=10, seed=None): """Performs an approximate weighted resampling of `inputs`. This method chooses elements from `inputs` where each item's rate of selection is proportional to its value in `weights`, and the average rate of selection across all inputs (and many invocations!) is `overall_rate`. Args: inputs: A list of tensors whose first dimension is `batch_size`. weights: A `[batch_size]`-shaped tensor with each batch member's weight. overall_rate: Desired overall rate of resampling. scope: Scope to use for the op. mean_decay: How quickly to decay the running estimate of the mean weight. warmup: Until the resulting tensor has been evaluated `warmup` times, the resampling menthod uses the true mean over all calls as its weight estimate, rather than a decayed mean. seed: Random seed. Returns: A list of tensors exactly like `inputs`, but with an unknown (and possibly zero) first dimension. A tensor containing the effective resampling rate used for each output. """ # Algorithm: Just compute rates as weights/mean_weight * # overall_rate. This way the the average weight corresponds to the # overall rate, and a weight twice the average has twice the rate, # etc. with ops.name_scope(scope, 'weighted_resample', inputs) as opscope: # First: Maintain a running estimated mean weight, with decay # adjusted (by also maintaining an invocation count) during the # warmup period so that at the beginning, there aren't too many # zeros mixed in, throwing the average off. with variable_scope.variable_scope(scope, 'estimate_mean', inputs): count_so_far = variable_scope.get_local_variable( 'resample_count', initializer=0) estimated_mean = variable_scope.get_local_variable( 'estimated_mean', initializer=0.0) count = count_so_far.assign_add(1) real_decay = math_ops.minimum( math_ops.truediv((count - 1), math_ops.minimum(count, warmup)), mean_decay) batch_mean = math_ops.reduce_mean(weights) mean = moving_averages.assign_moving_average( estimated_mean, batch_mean, real_decay, zero_debias=False) # Then, normalize the weights into rates using the mean weight and # overall target rate: rates = weights * overall_rate / mean results = resample_at_rate([rates] + inputs, rates, scope=opscope, seed=seed, back_prop=False) return (results[1:], results[0])
Example #6
Source File: resample.py From keras-lambda with MIT License | 4 votes |
def weighted_resample(inputs, weights, overall_rate, scope=None, mean_decay=0.999, seed=None): """Performs an approximate weighted resampling of `inputs`. This method chooses elements from `inputs` where each item's rate of selection is proportional to its value in `weights`, and the average rate of selection across all inputs (and many invocations!) is `overall_rate`. Args: inputs: A list of tensors whose first dimension is `batch_size`. weights: A `[batch_size]`-shaped tensor with each batch member's weight. overall_rate: Desired overall rate of resampling. scope: Scope to use for the op. mean_decay: How quickly to decay the running estimate of the mean weight. seed: Random seed. Returns: A list of tensors exactly like `inputs`, but with an unknown (and possibly zero) first dimension. A tensor containing the effective resampling rate used for each output. """ # Algorithm: Just compute rates as weights/mean_weight * # overall_rate. This way the average weight corresponds to the # overall rate, and a weight twice the average has twice the rate, # etc. with ops.name_scope(scope, 'weighted_resample', inputs) as opscope: # First: Maintain a running estimated mean weight, with zero debiasing # enabled (by default) to avoid throwing the average off. with variable_scope.variable_scope(scope, 'estimate_mean', inputs): estimated_mean = variable_scope.get_local_variable( 'estimated_mean', initializer=math_ops.cast(0, weights.dtype), dtype=weights.dtype) batch_mean = math_ops.reduce_mean(weights) mean = moving_averages.assign_moving_average( estimated_mean, batch_mean, mean_decay) # Then, normalize the weights into rates using the mean weight and # overall target rate: rates = weights * overall_rate / mean results = resample_at_rate([rates] + inputs, rates, scope=opscope, seed=seed, back_prop=False) return (results[1:], results[0])