Python tensorflow.python.ops.variable_scope.AUTO_REUSE Examples
The following are 7
code examples of tensorflow.python.ops.variable_scope.AUTO_REUSE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.variable_scope
, or try the search function
.
Example #1
Source File: layer_collection.py From noisy-K-FAC with Apache License 2.0 | 5 votes |
def register_block(self, layer_key, fisher_block, reuse=VARIABLE_SCOPE): if reuse is VARIABLE_SCOPE: reuse = variable_scope.get_variable_scope().reuse if reuse is True or (reuse is variable_scope.AUTO_REUSE and layer_key in self.fisher_blocks): result = self.fisher_blocks[layer_key] if type(result) != type(fisher_block): # pylint: disable=unidiomatic-typecheck raise ValueError( "Attempted to register FisherBlock of type %s when existing " "FisherBlock has type %s." % (type(fisher_block), type(result))) return result if reuse is False and layer_key in self.fisher_blocks: raise ValueError("FisherBlock for %s is already in LayerCollection." % (layer_key,)) # Insert fisher_block into self.fisher_blocks. if layer_key in self.fisher_blocks: raise ValueError("Duplicate registration: {}".format(layer_key)) # Raise an error if any variable in layer_key has been registered in any # other blocks. variable_to_block = { var: (params, block) for (params, block) in self.fisher_blocks.items() for var in ensure_sequence(params) } for variable in ensure_sequence(layer_key): if variable in variable_to_block: prev_key, prev_block = variable_to_block[variable] raise ValueError( "Attempted to register layer_key {} with block {}, but variable {}" " was already registered in key {} with block {}.".format( layer_key, fisher_block, variable, prev_key, prev_block)) self.fisher_blocks[layer_key] = fisher_block return fisher_block
Example #2
Source File: early_stopping.py From estimator with Apache License 2.0 | 5 votes |
def _get_or_create_stop_var(): with tf.compat.v1.variable_scope( name_or_scope='signal_early_stopping', values=[], reuse=tf.compat.v1.AUTO_REUSE): return tf.compat.v1.get_variable( name='STOP', shape=[], dtype=tf.dtypes.bool, initializer=tf.compat.v1.initializers.constant(False), collections=[tf.compat.v1.GraphKeys.GLOBAL_VARIABLES], trainable=False)
Example #3
Source File: early_stopping.py From estimator with Apache License 2.0 | 5 votes |
def _get_or_create_stop_var_with_aggregation(self): with variable_scope.variable_scope( name_or_scope='signal_early_stopping', values=[], reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( name='STOP', shape=[], dtype=tf.dtypes.int32, initializer=init_ops.constant_initializer(0), collections=[ops.GraphKeys.GLOBAL_VARIABLES], synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation=variable_scope.VariableAggregation.SUM, trainable=False)
Example #4
Source File: tpu_estimator.py From Chinese-XLNet with Apache License 2.0 | 4 votes |
def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True)
Example #5
Source File: tpu_estimator.py From embedding-as-service with MIT License | 4 votes |
def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True)
Example #6
Source File: tpu_estimator.py From transformer-xl with Apache License 2.0 | 4 votes |
def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True)
Example #7
Source File: tpu_estimator.py From xlnet with Apache License 2.0 | 4 votes |
def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True)