Python tensorflow.python.ops.variables.PartitionedVariable() Examples
The following are 30
code examples of tensorflow.python.ops.variables.PartitionedVariable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.variables
, or try the search function
.
Example #1
Source File: variables_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testPartitionedVariable(self): with tf.Graph().as_default(): v0 = tf.Variable([0]) v1 = tf.Variable([1]) v0._set_save_slice_info(variables.Variable.SaveSliceInfo( v0.name, [2], [0], [1])) v1._set_save_slice_info(variables.Variable.SaveSliceInfo( v0.name, [2], [1], [1])) partitions = [2] # Pass variable_list as [v1, v0] to ensure they are properly # re-sorted to [v0, v1] based on their slice info offsets. partitioned_variable = variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v1, v0], partitions=partitions) concatenated = tf.convert_to_tensor(partitioned_variable) num_partitions = len(partitioned_variable) iterated_partitions = list(partitioned_variable) self.assertEqual(2, num_partitions) self.assertEqual([v0, v1], iterated_partitions) self.assertEqual([2], concatenated.get_shape())
Example #2
Source File: rnn_cell.py From Artificial-Neural-Network-THU-2018 with MIT License | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #3
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #4
Source File: layers.py From tensornets with MIT License | 5 votes |
def _add_variable_to_collections(variable, collections_set, collections_name): """Adds variable (or all its parts) to all collections with that name.""" collections = utils.get_variable_collections(collections_set, collections_name) or [] variables_list = [variable] if isinstance(variable, tf_variables.PartitionedVariable): variables_list = [v for v in variable] for collection in collections: for var in variables_list: if var not in ops.get_collection(collection): ops.add_to_collection(collection, var)
Example #5
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #6
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #7
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #8
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #9
Source File: rnn_cell.py From Artificial-Neural-Network-THU-2018 with MIT License | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #10
Source File: rnn_cell.py From Artificial-Neural-Network-THU-2018 with MIT License | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #11
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #12
Source File: rnn_dropout.py From GtS with MIT License | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #13
Source File: linear_testing_utils_v1.py From estimator with Apache License 2.0 | 5 votes |
def begin(self): with tf.compat.v1.variable_scope( tf.compat.v1.get_variable_scope()) as scope: scope.reuse_variables() partitioned_weight = tf.compat.v1.get_variable( self._var_name, shape=(self._var_dim, 1)) self._test_case.assertTrue( isinstance(partitioned_weight, variables_lib.PartitionedVariable)) for part in partitioned_weight: self._test_case.assertEqual(self._var_dim // self._partitions, part.get_shape()[0])
Example #14
Source File: sdca_ops.py From estimator with Apache License 2.0 | 5 votes |
def _create_slots(self): """Make unshrunk internal variables (slots).""" # Unshrunk variables have the updates before applying L1 regularization. # Each unshrunk slot variable is either a `Variable` or list of # `Variable`, depending on the value of its corresponding primary variable. # We avoid using `PartitionedVariable` for the unshrunk slots since we do # not need any of the extra information. self._slots = collections.defaultdict(list) for name in ['sparse_features_weights', 'dense_features_weights']: for var in self._variables[name]: # Our primary variable may be either a PartitionedVariable, or a list # of Variables (each representing a partition). if (isinstance(var, var_ops.PartitionedVariable) or isinstance(var, list)): var_list = [] for v in var: with ops.colocate_with(v): slot_var = tf.Variable( initial_value=tf.compat.v1.zeros_like(v.initialized_value(), tf.dtypes.float32), name=v.op.name + '_unshrunk') var_list.append(slot_var) self._slots['unshrunk_' + name].append(var_list) else: with tf.compat.v1.device(var.device): self._slots['unshrunk_' + name].append( tf.Variable( tf.compat.v1.zeros_like(var.initialized_value(), tf.dtypes.float32), name=var.op.name + '_unshrunk'))
Example #15
Source File: sdca_ops.py From estimator with Apache License 2.0 | 5 votes |
def _var_to_list(self, var): """Wraps var in a list if it is not a list or PartitionedVariable.""" if not isinstance(var, (list, var_ops.PartitionedVariable)): var = [var] return var
Example #16
Source File: sdca_ops.py From estimator with Apache License 2.0 | 5 votes |
def _convert_n_to_tensor(self, input_list, as_ref=False): """Converts input list to a set of tensors.""" # input_list can be a list of Variables (that are implicitly partitioned), # in which case the underlying logic in internal_convert_to_tensor will not # concatenate the partitions together. This method takes care of the # concatenating (we only allow partitioning on the first axis). output_list = [] for x in input_list: tensor_to_convert = x if isinstance(x, list) or isinstance(x, var_ops.PartitionedVariable): # We only allow for partitioning on the first axis. tensor_to_convert = tf.concat(x, axis=0) output_list.append( internal_convert_to_tensor(tensor_to_convert, as_ref=as_ref)) return output_list
Example #17
Source File: rnn_cell_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) if context.in_graph_mode(): trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) else: trainable = variable._trainable # pylint: disable=protected-access if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #18
Source File: feature_column.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): # Get sparse IDs and weights. sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access inputs, weight_collections=weight_collections, trainable=trainable) sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor # Create embedding weight, and restore from checkpoint if necessary. embedding_weights = variable_scope.get_variable( name='embedding_weights', shape=(self.categorical_column._num_buckets, self.dimension), # pylint: disable=protected-access dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections) if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, { self.tensor_name_in_ckpt: to_restore }) # Return embedding lookup result. return _safe_embedding_lookup_sparse( embedding_weights=embedding_weights, sparse_ids=sparse_ids, sparse_weights=sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm)
Example #19
Source File: embedding_ops.py From keras-lambda with MIT License | 5 votes |
def embedding_lookup_unique(params, ids, name=None): """Version of embedding_lookup that avoids duplicate lookups. This can save communication in the case of repeated ids. Same interface as embedding_lookup. Except it supports multi-dimensional `ids` which allows to not reshape input/output to fit gather. Args: params: A list of tensors with the same shape and type, or a `PartitionedVariable`. Shape `[index, d1, d2, ...]`. ids: A one-dimensional `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`. name: A name for this operation (optional). Returns: A `Tensor` with the same type as the tensors in `params` and dimension of `[ids1, ids2, d1, d2, ...]`. Raises: ValueError: If `params` is empty. """ with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]): ids = ops.convert_to_tensor(ids) shape = array_ops.shape(ids) ids_flat = array_ops.reshape( ids, math_ops.reduce_prod(shape, keep_dims=True)) unique_ids, idx = array_ops.unique(ids_flat) unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids) embeds_flat = array_ops.gather(unique_embeddings, idx) embed_shape = array_ops.concat( [shape, array_ops.shape(unique_embeddings)[1:]], 0) embeds = array_ops.reshape(embeds_flat, embed_shape) embeds.set_shape(ids.get_shape().concatenate( unique_embeddings.get_shape()[1:])) return embeds
Example #20
Source File: layers.py From keras-lambda with MIT License | 5 votes |
def _add_variable_to_collections(variable, collections_set, collections_name): """Adds variable (or all its parts) to all collections with that name.""" collections = utils.get_variable_collections( collections_set, collections_name) or [] variables_list = [variable] if isinstance(variable, tf_variables.PartitionedVariable): variables_list = [v for v in variable] for collection in collections: for var in variables_list: if var not in ops.get_collection(collection): ops.add_to_collection(collection, var)
Example #21
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #22
Source File: rnn_cell_impl.py From lambda-packs with MIT License | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #23
Source File: feature_column.py From lambda-packs with MIT License | 5 votes |
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): # Get sparse IDs and weights. sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access inputs, weight_collections=weight_collections, trainable=trainable) sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor # Create embedding weight, and restore from checkpoint if necessary. embedding_weights = variable_scope.get_variable( name='embedding_weights', shape=(self.categorical_column._num_buckets, self.dimension), # pylint: disable=protected-access dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections) if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, { self.tensor_name_in_ckpt: to_restore }) # Return embedding lookup result. return _safe_embedding_lookup_sparse( embedding_weights=embedding_weights, sparse_ids=sparse_ids, sparse_weights=sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm)
Example #24
Source File: layers.py From lambda-packs with MIT License | 5 votes |
def _add_variable_to_collections(variable, collections_set, collections_name): """Adds variable (or all its parts) to all collections with that name.""" collections = utils.get_variable_collections( collections_set, collections_name) or [] variables_list = [variable] if isinstance(variable, tf_variables.PartitionedVariable): variables_list = [v for v in variable] for collection in collections: for var in variables_list: if var not in ops.get_collection(collection): ops.add_to_collection(collection, var)
Example #25
Source File: embedding_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def embedding_lookup_unique(params, ids, name=None): """Version of embedding_lookup that avoids duplicate lookups. This can save communication in the case of repeated ids. Same interface as embedding_lookup. Except it supports multi-dimensional `ids` which allows to not reshape input/output to fit gather. Args: params: A list of tensors with the same shape and type, or a `PartitionedVariable`. Shape `[index, d1, d2, ...]`. ids: A one-dimensional `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`. name: A name for this operation (optional). Returns: A `Tensor` with the same type as the tensors in `params` and dimension of `[ids1, ids2, d1, d2, ...]`. Raises: ValueError: If `params` is empty. """ with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]): ids = ops.convert_to_tensor(ids) shape = array_ops.shape(ids) ids_flat = array_ops.reshape( ids, math_ops.reduce_prod(shape, keep_dims=True)) unique_ids, idx = array_ops.unique(ids_flat) unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids) embeds_flat = array_ops.gather(unique_embeddings, idx) embed_shape = array_ops.concat( [shape, array_ops.shape(unique_embeddings)[1:]], 0) embeds = array_ops.reshape(embeds_flat, embed_shape) embeds.set_shape(ids.get_shape().concatenate( unique_embeddings.get_shape()[1:])) return embeds
Example #26
Source File: layers.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _add_variable_to_collections(variable, collections_set, collections_name): """Adds variable (or all its parts) to all collections with that name.""" collections = utils.get_variable_collections( collections_set, collections_name) or [] variables_list = [variable] if isinstance(variable, tf_variables.PartitionedVariable): variables_list = [v for v in variable] for collection in collections: for var in variables_list: if var not in ops.get_collection(collection): ops.add_to_collection(collection, var)
Example #27
Source File: layers.py From tf-slim with Apache License 2.0 | 5 votes |
def _add_variable_to_collections(variable, collections_set, collections_name): """Adds variable (or all its parts) to all collections with that name.""" collections = utils.get_variable_collections(collections_set, collections_name) or [] variables_list = [variable] if isinstance(variable, tf_variables.PartitionedVariable): variables_list = [v for v in variable] for collection in collections: for var in variables_list: if var not in ops.get_collection(collection): ops.add_to_collection(collection, var)
Example #28
Source File: embedding_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def embedding_lookup_unique(params, ids, name=None): """Version of embedding_lookup that avoids duplicate lookups. This can save communication in the case of repeated ids. Same interface as embedding_lookup. Except it supports multi-dimensional `ids` which allows to not reshape input/output to fit gather. Args: params: A list of tensors with the same shape and type, or a `PartitionedVariable`. Shape `[index, d1, d2, ...]`. ids: A one-dimensional `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`. name: A name for this operation (optional). Returns: A `Tensor` with the same type as the tensors in `params` and dimension of `[ids1, ids2, d1, d2, ...]`. Raises: ValueError: If `params` is empty. """ with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]): ids = ops.convert_to_tensor(ids) shape = array_ops.shape(ids) ids_flat = array_ops.reshape( ids, math_ops.reduce_prod(shape, keep_dims=True)) unique_ids, idx = array_ops.unique(ids_flat) unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids) embeds_flat = array_ops.gather(unique_embeddings, idx) embed_shape = array_ops.concat( 0, [shape, array_ops.shape(unique_embeddings)[1:]]) embeds = array_ops.reshape(embeds_flat, embed_shape) embeds.set_shape(ids.get_shape().concatenate( unique_embeddings.get_shape()[1:])) return embeds
Example #29
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable
Example #30
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) trainable = (variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable