Python tensorflow.init_scope() Examples
The following are 15
code examples of tensorflow.init_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: train_policy.py From lm-human-preferences with MIT License | 6 votes |
def tf_times(): """Returns (time since start, time since last) as a tensorflow op.""" # Keep track of start and last times with tf.init_scope(): init = tf.timestamp() def make(name): return tf.Variable(init, name=name, trainable=False, use_resource=True) start = make('start_time') last = make('last_time') # Get new time and update last now = tf.timestamp() prev = last.read_value() with tf.control_dependencies([prev]): with tf.control_dependencies([last.assign(now)]): return tf.cast(now - start.read_value(), tf.float32), tf.cast(now - prev, tf.float32)
Example #2
Source File: metrics.py From larq with Apache License 2.0 | 6 votes |
def update_state(self, values, sample_weight=None): values = tf.cast(values, self.values_dtype) if not self.built: with tf.name_scope(self.name), tf.init_scope(): self.build(values.shape) unchanged_values = tf.math.count_nonzero( tf.equal(self._previous_values, values) ) flip_ratio = 1 - ( tf.cast(unchanged_values, self.dtype) / tf.cast(self._size, self.dtype) ) update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count)) with tf.control_dependencies([update_total_op]): update_count_op = self.count.assign_add(1) with tf.control_dependencies([update_count_op]): return self._previous_values.assign(values)
Example #3
Source File: adversarial_regularization.py From neural-structured-learning with Apache License 2.0 | 6 votes |
def _clone_metrics(metrics): """Creates a copy of the maybe-nested metric specification. Args: metrics: A collection of metric specifications. Supports the same set of formats as the `metrics` argument in `tf.keras.Model.compile`. Returns: The same format as the `metrics` argument, with all `tf.keras.metric.Metric` objects replaced by their copies. """ def clone(metric): # A `Metric` object is stateful and can only be used in 1 model on 1 output. # Cloning the object allows the same metric to be applied in both base and # adversarial-regularized models, and also on multiple outputs in one model. # The cloning logic is the same as the `clone_metric` function in # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/metrics.py if not isinstance(metric, tf.keras.metrics.Metric): return metric with tf.init_scope(): return metric.__class__.from_config(metric.get_config()) return tf.nest.map_structure(clone, metrics)
Example #4
Source File: masks.py From ylg with GNU General Public License v3.0 | 6 votes |
def __call__(self): with tf.init_scope(): if self.mode == 'interleave': return next(self.cycled_masks) elif self.mode == 'merged_head': # avoid re-computation if self.merged_head is None: nL = self.masks[0].shape[0] self.merged_head = tf.ones((nL, nL), dtype=tf.int32) for mask in self.masks: self.merged_head = self.merged_head * mask return self.merged_head elif self.mode == 'heads': return np.array(self.masks) else: raise ValueError('Not supported attention mode')
Example #5
Source File: variables_helper.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def get_global_variables_safely(): """If not executing eagerly, returns tf.global_variables(). Raises a ValueError if eager execution is enabled, because the variables are not tracked when executing eagerly. If executing eagerly, use a Keras model's .variables property instead. Returns: The result of tf.global_variables() """ with tf.init_scope(): if tf.executing_eagerly(): raise ValueError("Global variables collection is not tracked when " "executing eagerly. Use a Keras model's `.variables` " "attribute instead.") return tf.global_variables()
Example #6
Source File: core.py From lm-human-preferences with MIT License | 5 votes |
def get_summary_writer(save_dir, subdir='', comm=MPI.COMM_WORLD): if comm.Get_rank() != 0: return None if save_dir is None: return None with tf.init_scope(): return summary.create_file_writer(os.path.join(save_dir, 'tb', subdir))
Example #7
Source File: policy.py From lm-human-preferences with MIT License | 5 votes |
def _set_initializers(self): """Change initializers to load a language model from a tensorflow checkpoint.""" # Skip if # 1. We're not rank 0. Values will be copied from there. # 2. We want random initialization. Normal initialization will do the work. if not self.is_root or self.trained_model.name == 'test': return with tf.init_scope(): scope = self.scope.name # Initialize! params = {v.op.name: v for v in utils.find_trainable_variables(scope)} self.trained_model.init_op(params, new_scope=scope)
Example #8
Source File: rewards.py From lm-human-preferences with MIT License | 5 votes |
def _set_initializers(self): """Change initializers to load a language model from a tensorflow checkpoint.""" # Skip if # 1. We're not rank 0. Values will be copied from there. # 2. We want random initialization. Normal initialization will do the work. if not self.is_root or self.trained_model.name == 'test': return with tf.init_scope(): # Initialize! params = {v.op.name: v for v in utils.find_trainable_variables(self.scope)} assert params self.trained_model.init_op(params, new_scope=self.scope)
Example #9
Source File: rewards.py From lm-human-preferences with MIT License | 5 votes |
def _set_initializers(self): """Change initializers to load a model from a tensorflow checkpoint.""" if self.comm.Get_rank() > 0 or self.train_dir == 'test': return assert self.model.built checkpoint_scope = 'reward_model' with tf.init_scope(): # Initialize! params = {v.op.name: v for v in self.get_params()} checkpoint = tf.train.latest_checkpoint(os.path.join(self.train_dir, 'checkpoints/')) available = tf.train.list_variables(checkpoint) unchanged = {} for name, shape in available: if not name.startswith(checkpoint_scope + '/'): # print('skipping', name) continue if name.endswith('adam') or name.endswith('adam_1'): # print('skipping', name) continue print('setting', name) var = params[self.scope + name[len(checkpoint_scope):]] assert var.shape == shape, 'Shape mismatch: %s.shape = %s != %s' % (var.op.name, var.shape, shape) unchanged[name] = var tf.train.init_from_checkpoint(checkpoint, unchanged)
Example #10
Source File: optimizers.py From larq with Apache License 2.0 | 5 votes |
def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs): """Apply gradients to variables for each optimizer. On the first call to `apply_gradients()`, compute the mapping from variables to optimizers and cache it in the `self.var_opt_mapping` dict for serialization and faster access. """ if self.var_opt_mapping is None: # Convert `grads_and_vars` to list so we can iterate multiple times over it grads_and_vars = list(grads_and_vars) self._compute_var_opt_mapping(grads_and_vars) # Split gradients and variables into a separate list for each optimizer grad_var_lists = [[] for _ in range(len(self.pred_opt_pairs) + 1)] for grad, var in grads_and_vars: if var.name in self.var_opt_mapping: grad_var_lists[self.var_opt_mapping[var.name]].append((grad, var)) with tf.init_scope(): for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists): optimizer._create_slots([v for (_, v) in grads_and_vars]) return tf.distribute.get_replica_context().merge_call( self._apply_gradients, args=(grad_var_lists, name), kwargs=kwargs )
Example #11
Source File: test_tf.py From delira with GNU Affero General Public License v3.0 | 5 votes |
def test_load_save_eager(self): import tensorflow as tf tf.enable_eager_execution() from delira.io.tf import load_checkpoint_eager, save_checkpoint_eager from delira.models import AbstractTfEagerNetwork import numpy as np class DummyNetwork(AbstractTfEagerNetwork): def __init__(self, in_channels, n_outputs): super().__init__(in_channels=in_channels, n_outputs=n_outputs) with tf.init_scope(): self.net = self._build_model(in_channels, n_outputs) @staticmethod def _build_model(in_channels, n_outputs): return tf.keras.models.Sequential( layers=[ tf.keras.layers.Dense( 64, input_shape=in_channels, bias_initializer='glorot_uniform'), tf.keras.layers.ReLU(), tf.keras.layers.Dense( n_outputs, bias_initializer='glorot_uniform')]) def call(self, inputs): return self.net(inputs) net = DummyNetwork((32,), 1) input_tensor = tf.constant(np.random.rand(1, 32).astype(np.float32)) result_pre_save = net(input_tensor) save_checkpoint_eager("./model_eager", model=net) loaded_state = load_checkpoint_eager("./model_eager", model=net) loaded_net = loaded_state["model"] result_post_save = loaded_net(input_tensor) self.assertTrue(np.array_equal(result_post_save, result_pre_save))
Example #12
Source File: native_module.py From hub with Apache License 2.0 | 5 votes |
def __init__(self, spec, meta_graph, trainable, checkpoint_path, name): """Private constructor. Args: spec: _ModuleSpec instance. meta_graph: MetaGraphDef to use trainable: whether module is trainable. checkpoint_path: None or a string to the variables checkpoints. name: variable and scope name where to instantiate the Module. Must be an unused name scope. """ self._spec = spec self._meta_graph = meta_graph self._trainable = trainable self._checkpoint_path = checkpoint_path register_ops_if_needed({ op.name for op in self._meta_graph.meta_info_def.stripped_op_list.op}) if _is_tpu_graph_function(): # TODO(b/129142908): Hub should not use `tf.init_scope` since that makes # it incompatible with tf.compat.v1.wrap_function. For now the only use # case where hub used it was for tpu compatibility. This should be cleaned # up at an early convinience. scope_func = tf.init_scope else: scope_func = lambda: tf.control_dependencies(None) # Clear dependencies so modules can be constructed from deep inside # functions that have dependencies active. Note that the dependencies # would be active when applying the Module signature, just not active # when creating the Module state. This use case has showed up in some # TPU training code. with scope_func(): self._init_state(name)
Example #13
Source File: optimizers.py From kfac with Apache License 2.0 | 4 votes |
def _create_optimizer(self): """Initializes the hyperparameters and sets the self._optimizer property.""" if self._optimizer: return if not self._layer_collection: self.register_layers(self._model, self._loss) if self._config['adapt_damping']: if 'train_batch' not in self._kfac_kwargs: raise ValueError('Must provide a train_batch tuple to use adaptive ' 'damping. Use register_train_batch or pass it in ' 'during optimizer construction.') if 'loss_fn' not in self._kfac_kwargs: self._kfac_kwargs['loss_fn'] = utils.get_loss_fn( self._model, self._loss, loss_weights=self._config['loss_weights']) with tf.name_scope(self._name): with tf.init_scope(): # "iterations" property will create iterations if necessary. _ = self.iterations self._create_hypers() self._kfac_kwargs.update(self._hyper) try: # We use the TF 1 variable_scope instead of the TF 2 recommended # name_scope because we need to recover the variables created in this # scope, which is not possible with name_scope. with tf.variable_scope(self._tf_var_scope): self._optimizer = _KFAC_OPT_CLASS( layer_collection=self._layer_collection, **self._kfac_kwargs) except ValueError as e: msg = str(e) if re.search('Variable .* already exists', msg): raise ValueError( 'You may have instantiated a KFAC Optimizer with the same name as ' 'an existing one. Try resetting the default graph, instantiating ' 'the optimizer with a different name, or changing the optimizer\'s ' 'name.\nHere is the original ValueError:\n ' + msg) elif re.search('Found the following errors with variable registration' '.*gamma.*registered with wrong number of uses.*', msg): # We don't regex the name batch_normalization because the user could # have renamed the layer. We don't regex beta because they could have # used BatchNorm without the shift. raise ValueError( 'There may have been an issue registering BatchNormalization. Try ' 'using tf.keras.backend.set_learning_phase before model ' 'construction. An alternative solution is to use the unfused ' 'batchnorm implementation (pass the argument fused=False to ' 'BatchNormalization).\nHere is the original ValueError:\n ' + msg) else: raise e
Example #14
Source File: grid_anchor_generator.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 4 votes |
def _generate(self, feature_map_shape_list): """Generates a collection of bounding boxes to be used as anchors. Args: feature_map_shape_list: list of pairs of convnet layer resolutions in the format [(height_0, width_0)]. For example, setting feature_map_shape_list=[(8, 8)] asks for anchors that correspond to an 8x8 layer. For this anchor generator, only lists of length 1 are allowed. Returns: boxes_list: a list of BoxLists each holding anchor boxes corresponding to the input feature map shapes. Raises: ValueError: if feature_map_shape_list, box_specs_list do not have the same length. ValueError: if feature_map_shape_list does not consist of pairs of integers """ if not (isinstance(feature_map_shape_list, list) and len(feature_map_shape_list) == 1): raise ValueError('feature_map_shape_list must be a list of length 1.') if not all([isinstance(list_item, tuple) and len(list_item) == 2 for list_item in feature_map_shape_list]): raise ValueError('feature_map_shape_list must be a list of pairs.') # Create constants in init_scope so they can be created in tf.functions # and accessed from outside of the function. with tf.init_scope(): self._base_anchor_size = tf.cast(tf.convert_to_tensor( self._base_anchor_size), dtype=tf.float32) self._anchor_stride = tf.cast(tf.convert_to_tensor( self._anchor_stride), dtype=tf.float32) self._anchor_offset = tf.cast(tf.convert_to_tensor( self._anchor_offset), dtype=tf.float32) grid_height, grid_width = feature_map_shape_list[0] scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales, self._aspect_ratios) scales_grid = tf.reshape(scales_grid, [-1]) aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1]) anchors = tile_anchors(grid_height, grid_width, scales_grid, aspect_ratios_grid, self._base_anchor_size, self._anchor_stride, self._anchor_offset) num_anchors = anchors.num_boxes_static() if num_anchors is None: num_anchors = anchors.num_boxes() anchor_indices = tf.zeros([num_anchors]) anchors.add_field('feature_map_index', anchor_indices) return [anchors]
Example #15
Source File: grid_anchor_generator.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 4 votes |
def _generate(self, feature_map_shape_list): """Generates a collection of bounding boxes to be used as anchors. Args: feature_map_shape_list: list of pairs of convnet layer resolutions in the format [(height_0, width_0)]. For example, setting feature_map_shape_list=[(8, 8)] asks for anchors that correspond to an 8x8 layer. For this anchor generator, only lists of length 1 are allowed. Returns: boxes_list: a list of BoxLists each holding anchor boxes corresponding to the input feature map shapes. Raises: ValueError: if feature_map_shape_list, box_specs_list do not have the same length. ValueError: if feature_map_shape_list does not consist of pairs of integers """ if not (isinstance(feature_map_shape_list, list) and len(feature_map_shape_list) == 1): raise ValueError('feature_map_shape_list must be a list of length 1.') if not all([isinstance(list_item, tuple) and len(list_item) == 2 for list_item in feature_map_shape_list]): raise ValueError('feature_map_shape_list must be a list of pairs.') # Create constants in init_scope so they can be created in tf.functions # and accessed from outside of the function. with tf.init_scope(): self._base_anchor_size = tf.cast(tf.convert_to_tensor( self._base_anchor_size), dtype=tf.float32) self._anchor_stride = tf.cast(tf.convert_to_tensor( self._anchor_stride), dtype=tf.float32) self._anchor_offset = tf.cast(tf.convert_to_tensor( self._anchor_offset), dtype=tf.float32) grid_height, grid_width = feature_map_shape_list[0] scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales, self._aspect_ratios) scales_grid = tf.reshape(scales_grid, [-1]) aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1]) anchors = tile_anchors(grid_height, grid_width, scales_grid, aspect_ratios_grid, self._base_anchor_size, self._anchor_stride, self._anchor_offset) num_anchors = anchors.num_boxes_static() if num_anchors is None: num_anchors = anchors.num_boxes() anchor_indices = tf.zeros([num_anchors]) anchors.add_field('feature_map_index', anchor_indices) return [anchors]