Python tensorflow.contrib.framework.python.ops.variables.model_variable() Examples
The following are 29
code examples of tensorflow.contrib.framework.python.ops.variables.model_variable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.framework.python.ops.variables
, or try the search function
.
Example #1
Source File: layers.py From lambda-packs with MIT License | 6 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource)
Example #2
Source File: convolution.py From tf-imagenet with Apache License 2.0 | 6 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource)
Example #3
Source File: depth_conv2d.py From X-Detector with Apache License 2.0 | 5 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource)
Example #4
Source File: lstm1d.py From lambda-packs with MIT License | 5 votes |
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None): """Run a softmax layer over all the time steps of an input sequence. Args: inputs: (length, batch_size, depth) tensor noutput: output depth scope: optional scope name name: optional name for output tensor linear_name: name for linear (pre-softmax) output Returns: A tensor of size (length, batch_size, noutput). """ length, _, ninputs = _shape(inputs) inputs_u = array_ops.unstack(inputs) output_u = [] with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]): initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1) initial_b = constant_op.constant(0.1, shape=[noutput]) w = variables.model_variable("weights", initializer=initial_w) b = variables.model_variable("biases", initializer=initial_b) for i in xrange(length): with variable_scope.variable_scope(scope, "SequenceSoftmaxStep", [inputs_u[i]]): # TODO(tmb) consider using slim.fully_connected(..., # activation_fn=tf.nn.softmax) linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name) output = nn_ops.softmax(linear) output_u += [output] outputs = array_ops.stack(output_u, name=name) return outputs
Example #5
Source File: feature_column_ops.py From keras-lambda with MIT License | 5 votes |
def _create_embedding_lookup(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions. """ with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[embedding_lookup_arguments.vocab_size, num_outputs], dtype=dtypes.float32, initializer=embedding_lookup_arguments.initializer, trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, embedding_lookup_arguments.input_tensor, sparse_weights=embedding_lookup_arguments.weight_tensor, combiner=embedding_lookup_arguments.combiner, name=column.name + '_weights') return variable, predictions
Example #6
Source File: layers.py From keras-lambda with MIT License | 5 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter)
Example #7
Source File: lstm1d.py From keras-lambda with MIT License | 5 votes |
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None): """Run a softmax layer over all the time steps of an input sequence. Args: inputs: (length, batch_size, depth) tensor noutput: output depth scope: optional scope name name: optional name for output tensor linear_name: name for linear (pre-softmax) output Returns: A tensor of size (length, batch_size, noutput). """ length, _, ninputs = _shape(inputs) inputs_u = array_ops.unstack(inputs) output_u = [] with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]): initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1) initial_b = constant_op.constant(0.1, shape=[noutput]) w = variables.model_variable("weights", initializer=initial_w) b = variables.model_variable("biases", initializer=initial_b) for i in xrange(length): with variable_scope.variable_scope(scope, "SequenceSoftmaxStep", [inputs_u[i]]): # TODO(tmb) consider using slim.fully_connected(..., # activation_fn=tf.nn.softmax) linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name) output = nn_ops.softmax(linear) output_u += [output] outputs = array_ops.stack(output_u, name=name) return outputs
Example #8
Source File: lstm_cells.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def init_state(self, state_name, batch_size, dtype, learned_state=False): """Creates an initial state compatible with this cell. Args: state_name: name of the state tensor batch_size: model batch size dtype: dtype for the tensor values i.e. tf.float32 learned_state: whether the initial state should be learnable. If false, the initial state is set to all 0's Returns: The created initial state. """ state_size = ( self.state_size_flat if self._flattened_state else self.state_size) # list of 2 zero tensors or variables tensors, depending on if # learned_state is true ret_flat = [(variables.model_variable( state_name + str(i), shape=s, dtype=dtype, initializer=tf.truncated_normal_initializer(stddev=0.03)) if learned_state else tf.zeros( [batch_size] + s, dtype=dtype, name=state_name)) for i, s in enumerate(state_size)] # duplicates initial state across the batch axis if it's learned if learned_state: ret_flat = [ tf.stack([tensor for i in range(int(batch_size))]) for tensor in ret_flat ] for s, r in zip(state_size, ret_flat): r.set_shape([None] + s) return tf.contrib.framework.nest.pack_sequence_as( structure=[1, 1], flat_sequence=ret_flat)
Example #9
Source File: lstm_cells.py From models with Apache License 2.0 | 5 votes |
def init_state(self, state_name, batch_size, dtype, learned_state=False): """Creates an initial state compatible with this cell. Args: state_name: name of the state tensor batch_size: model batch size dtype: dtype for the tensor values i.e. tf.float32 learned_state: whether the initial state should be learnable. If false, the initial state is set to all 0's Returns: ret: the created initial state """ state_size = ( self.state_size_flat if self._flatten_state else self.state_size) # list of 2 zero tensors or variables tensors, # depending on if learned_state is true # pylint: disable=g-long-ternary,g-complex-comprehension ret_flat = [(contrib_variables.model_variable( state_name + str(i), shape=s, dtype=dtype, initializer=tf.truncated_normal_initializer(stddev=0.03)) if learned_state else tf.zeros( [batch_size] + s, dtype=dtype, name=state_name)) for i, s in enumerate(state_size)] # duplicates initial state across the batch axis if it's learned if learned_state: ret_flat = [tf.stack([tensor for i in range(int(batch_size))]) for tensor in ret_flat] for s, r in zip(state_size, ret_flat): r = tf.reshape(r, [-1] + s) ret = tf.nest.pack_sequence_as(structure=[1, 1], flat_sequence=ret_flat) return ret
Example #10
Source File: lstm_cells.py From models with Apache License 2.0 | 5 votes |
def init_state(self, state_name, batch_size, dtype, learned_state=False): """Creates an initial state compatible with this cell. Args: state_name: name of the state tensor batch_size: model batch size dtype: dtype for the tensor values i.e. tf.float32 learned_state: whether the initial state should be learnable. If false, the initial state is set to all 0's Returns: The created initial state. """ state_size = ( self.state_size_flat if self._flatten_state else self.state_size) # list of 2 zero tensors or variables tensors, depending on if # learned_state is true # pylint: disable=g-long-ternary,g-complex-comprehension ret_flat = [(contrib_variables.model_variable( state_name + str(i), shape=s, dtype=dtype, initializer=tf.truncated_normal_initializer(stddev=0.03)) if learned_state else tf.zeros( [batch_size] + s, dtype=dtype, name=state_name)) for i, s in enumerate(state_size)] # duplicates initial state across the batch axis if it's learned if learned_state: ret_flat = [ tf.stack([tensor for i in range(int(batch_size))]) for tensor in ret_flat ] for s, r in zip(state_size, ret_flat): r.set_shape([None] + s) return tf.nest.pack_sequence_as(structure=[1, 1], flat_sequence=ret_flat)
Example #11
Source File: lstm_cells.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def init_state(self, state_name, batch_size, dtype, learned_state=False): """Creates an initial state compatible with this cell. Args: state_name: name of the state tensor batch_size: model batch size dtype: dtype for the tensor values i.e. tf.float32 learned_state: whether the initial state should be learnable. If false, the initial state is set to all 0's Returns: The created initial state. """ state_size = ( self.state_size_flat if self._flattened_state else self.state_size) # list of 2 zero tensors or variables tensors, depending on if # learned_state is true ret_flat = [(variables.model_variable( state_name + str(i), shape=s, dtype=dtype, initializer=tf.truncated_normal_initializer(stddev=0.03)) if learned_state else tf.zeros( [batch_size] + s, dtype=dtype, name=state_name)) for i, s in enumerate(state_size)] # duplicates initial state across the batch axis if it's learned if learned_state: ret_flat = [ tf.stack([tensor for i in range(int(batch_size))]) for tensor in ret_flat ] for s, r in zip(state_size, ret_flat): r.set_shape([None] + s) return tf.contrib.framework.nest.pack_sequence_as( structure=[1, 1], flat_sequence=ret_flat)
Example #12
Source File: pruning_layers.py From rigl with Apache License 2.0 | 5 votes |
def get_model_variables(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """This ensure variables are retrieved in a consistent way for core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource)
Example #13
Source File: mmnet_utils.py From MMNet with Apache License 2.0 | 5 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource)
Example #14
Source File: feature_column_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _create_embedding_lookup(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions. """ with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[embedding_lookup_arguments.vocab_size, num_outputs], dtype=dtypes.float32, initializer=embedding_lookup_arguments.initializer, trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, embedding_lookup_arguments.input_tensor, sparse_weights=embedding_lookup_arguments.weight_tensor, combiner=embedding_lookup_arguments.combiner, name=column.name + '_weights') return variable, predictions
Example #15
Source File: depth_conv2d.py From tf.fashionAI with Apache License 2.0 | 5 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource)
Example #16
Source File: preact_conv.py From tensorflow-litterbox with Apache License 2.0 | 5 votes |
def preact_conv2d( inputs, num_outputs, kernel_size, stride=1, padding='SAME', activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None): """Adds a 2D convolution preceded by batch normalization and activation. """ with variable_scope.variable_scope(scope, 'Conv', values=[inputs], reuse=reuse) as sc: inputs = ops.convert_to_tensor(inputs) dtype = inputs.dtype.base_dtype if normalizer_fn: normalizer_params = normalizer_params or {} inputs = normalizer_fn(inputs, activation_fn=activation_fn, **normalizer_params) kernel_h, kernel_w = utils.two_element_tuple(kernel_size) stride_h, stride_w = utils.two_element_tuple(stride) num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4) weights_shape = [kernel_h, kernel_w, num_filters_in, num_outputs] weights_collections = utils.get_variable_collections(variables_collections, 'weights') weights = variables.model_variable('weights', shape=weights_shape, dtype=dtype, initializer=weights_initializer, regularizer=weights_regularizer, collections=weights_collections, trainable=trainable) outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding=padding) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
Example #17
Source File: feature_column_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _create_embedding_lookup(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions. """ with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[embedding_lookup_arguments.vocab_size, num_outputs], dtype=dtypes.float32, initializer=embedding_lookup_arguments.initializer, trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, embedding_lookup_arguments.input_tensor, sparse_weights=embedding_lookup_arguments.weight_tensor, combiner=embedding_lookup_arguments.combiner, name=column.name + '_weights') return variable, predictions
Example #18
Source File: layers.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter)
Example #19
Source File: lstm1d.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None): """Run a softmax layer over all the time steps of an input sequence. Args: inputs: (length, batch_size, depth) tensor noutput: output depth scope: optional scope name name: optional name for output tensor linear_name: name for linear (pre-softmax) output Returns: A tensor of size (length, batch_size, noutput). """ length, _, ninputs = _shape(inputs) inputs_u = array_ops.unstack(inputs) output_u = [] with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]): initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1) initial_b = constant_op.constant(0.1, shape=[noutput]) w = variables.model_variable("weights", initializer=initial_w) b = variables.model_variable("biases", initializer=initial_b) for i in xrange(length): with variable_scope.variable_scope(scope, "SequenceSoftmaxStep", [inputs_u[i]]): # TODO(tmb) consider using slim.fully_connected(..., # activation_fn=tf.nn.softmax) linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name) output = nn_ops.softmax(linear) output_u += [output] outputs = array_ops.stack(output_u, name=name) return outputs
Example #20
Source File: feature_column_ops.py From lambda-packs with MIT License | 5 votes |
def _create_embedding_lookup(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions. """ with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[embedding_lookup_arguments.vocab_size, num_outputs], dtype=dtypes.float32, initializer=embedding_lookup_arguments.initializer, trainable=trainable, collections=weight_collections) if fc._is_variable(variable): # pylint: disable=protected-access variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, embedding_lookup_arguments.input_tensor, sparse_weights=embedding_lookup_arguments.weight_tensor, combiner=embedding_lookup_arguments.combiner, name=column.name + '_weights') return variable, predictions
Example #21
Source File: feature_column_ops.py From deep_image_model with Apache License 2.0 | 4 votes |
def _create_joint_embedding_lookup(columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates an embedding lookup for all columns sharing a single weight.""" for arg in embedding_lookup_arguments: assert arg.weight_tensor is None, ( 'Joint sums for weighted sparse columns are not supported. ' 'Please use weighted_sum_from_feature_columns instead.') assert arg.combiner == 'sum', ( 'Combiners other than sum are not supported for joint sums. ' 'Please use weighted_sum_from_feature_columns instead.') assert len(embedding_lookup_arguments) >= 1, ( 'At least one column must be in the model.') prev_size = 0 sparse_tensors = [] for a in embedding_lookup_arguments: t = a.input_tensor values = t.values + prev_size prev_size += a.vocab_size sparse_tensors.append( sparse_tensor_py.SparseTensor(t.indices, values, t.shape)) sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors) with variable_scope.variable_scope( None, default_name='linear_weights', values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[prev_size, num_outputs], dtype=dtypes.float32, initializer=init_ops.zeros_initializer, trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, sparse_tensor, sparse_weights=None, combiner='sum', name='_weights') return variable, predictions
Example #22
Source File: encoders.py From deep_image_model with Apache License 2.0 | 4 votes |
def bow_encoder(ids, vocab_size, embed_dim, sparse_lookup=True, initializer=None, regularizer=None, trainable=True, scope=None, reuse=None): """Maps a sequence of symbols to a vector per example by averaging embeddings. Args: ids: `[batch_size, doc_length]` `Tensor` or `SparseTensor` of type `int32` or `int64` with symbol ids. vocab_size: Integer number of symbols in vocabulary. embed_dim: Integer number of dimensions for embedding matrix. sparse_lookup: `bool`, if `True`, converts ids to a `SparseTensor` and performs a sparse embedding lookup. This is usually faster, but not desirable if padding tokens should have an embedding. Empty rows are assigned a special embedding. initializer: An initializer for the embeddings, if `None` default for current scope is used. regularizer: Optional regularizer for the embeddings. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional string specifying the variable scope for the op, required if `reuse=True`. reuse: If `True`, variables inside the op will be reused. Returns: Encoding `Tensor` `[batch_size, embed_dim]` produced by averaging embeddings. Raises: ValueError: If `embed_dim` or `vocab_size` are not specified. """ if not vocab_size or not embed_dim: raise ValueError('Must specify vocab size and embedding dimension') with variable_scope.variable_scope( scope, 'bow_encoder', [ids], reuse=reuse): embeddings = variables.model_variable( 'embeddings', shape=[vocab_size, embed_dim], initializer=initializer, regularizer=regularizer, trainable=trainable) if sparse_lookup: if isinstance(ids, sparse_tensor.SparseTensor): sparse_ids = ids else: sparse_ids = sparse_ops.dense_to_sparse_tensor(ids) return contrib_embedding_ops.safe_embedding_lookup_sparse( [embeddings], sparse_ids, combiner='mean', default_id=0) else: if isinstance(ids, sparse_tensor.SparseTensor): raise TypeError('ids are expected to be dense Tensor, got: %s', ids) return math_ops.reduce_mean( embedding_ops.embedding_lookup(embeddings, ids), reduction_indices=1)
Example #23
Source File: encoders.py From deep_image_model with Apache License 2.0 | 4 votes |
def embed_sequence(ids, vocab_size=None, embed_dim=None, unique=False, initializer=None, regularizer=None, trainable=True, scope=None, reuse=None): """Maps a sequence of symbols to a sequence of embeddings. Typical use case would be reusing embeddings between an encoder and decoder. Args: ids: `[batch_size, doc_length]` `Tensor` of type `int32` or `int64` with symbol ids. vocab_size: Integer number of symbols in vocabulary. embed_dim: Integer number of dimensions for embedding matrix. unique: If `True`, will first compute the unique set of indices, and then lookup each embedding once, repeating them in the output as needed. initializer: An initializer for the embeddings, if `None` default for current scope is used. regularizer: Optional regularizer for the embeddings. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). scope: Optional string specifying the variable scope for the op, required if `reuse=True`. reuse: If `True`, variables inside the op will be reused. Returns: `Tensor` of `[batch_size, doc_length, embed_dim]` with embedded sequences. Raises: ValueError: if `embed_dim` or `vocab_size` are not specified when not `reuse` is `None` or `False`. """ if not (reuse or (vocab_size and embed_dim)): raise ValueError('Must specify vocab size and embedding dimension when not' 'reusing. Got vocab_size=%s and embed_dim=%s' % ( vocab_size, embed_dim)) with variable_scope.variable_scope( scope, 'EmbedSequence', [ids], reuse=reuse): shape = [vocab_size, embed_dim] if reuse and vocab_size is None or embed_dim is None: shape = None embeddings = variables.model_variable( 'embeddings', shape=shape, initializer=initializer, regularizer=regularizer, trainable=trainable) if unique: return contrib_embedding_ops.embedding_lookup_unique(embeddings, ids) return embedding_ops.embedding_lookup(embeddings, ids)
Example #24
Source File: encoders.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def bow_encoder(ids, vocab_size, embed_dim, sparse_lookup=True, initializer=None, regularizer=None, trainable=True, scope=None, reuse=None): """Maps a sequence of symbols to a vector per example by averaging embeddings. Args: ids: `[batch_size, doc_length]` `Tensor` or `SparseTensor` of type `int32` or `int64` with symbol ids. vocab_size: Integer number of symbols in vocabulary. embed_dim: Integer number of dimensions for embedding matrix. sparse_lookup: `bool`, if `True`, converts ids to a `SparseTensor` and performs a sparse embedding lookup. This is usually faster, but not desirable if padding tokens should have an embedding. Empty rows are assigned a special embedding. initializer: An initializer for the embeddings, if `None` default for current scope is used. regularizer: Optional regularizer for the embeddings. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional string specifying the variable scope for the op, required if `reuse=True`. reuse: If `True`, variables inside the op will be reused. Returns: Encoding `Tensor` `[batch_size, embed_dim]` produced by averaging embeddings. Raises: ValueError: If `embed_dim` or `vocab_size` are not specified. """ if not vocab_size or not embed_dim: raise ValueError('Must specify vocab size and embedding dimension') with variable_scope.variable_scope( scope, 'bow_encoder', [ids], reuse=reuse): embeddings = variables.model_variable( 'embeddings', shape=[vocab_size, embed_dim], initializer=initializer, regularizer=regularizer, trainable=trainable) if sparse_lookup: if isinstance(ids, sparse_tensor.SparseTensor): sparse_ids = ids else: sparse_ids = sparse_ops.dense_to_sparse_tensor(ids) return contrib_embedding_ops.safe_embedding_lookup_sparse( [embeddings], sparse_ids, combiner='mean', default_id=0) else: if isinstance(ids, sparse_tensor.SparseTensor): raise TypeError('ids are expected to be dense Tensor, got: %s', ids) return math_ops.reduce_mean( embedding_ops.embedding_lookup(embeddings, ids), reduction_indices=1)
Example #25
Source File: feature_column_ops.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def _create_joint_embedding_lookup(columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates an embedding lookup for all columns sharing a single weight.""" for arg in embedding_lookup_arguments: assert arg.weight_tensor is None, ( 'Joint sums for weighted sparse columns are not supported. ' 'Please use weighted_sum_from_feature_columns instead.') assert arg.combiner == 'sum', ( 'Combiners other than sum are not supported for joint sums. ' 'Please use weighted_sum_from_feature_columns instead.') assert len(embedding_lookup_arguments) >= 1, ( 'At least one column must be in the model.') prev_size = 0 sparse_tensors = [] for a in embedding_lookup_arguments: t = a.input_tensor values = t.values + prev_size prev_size += a.vocab_size sparse_tensors.append( sparse_tensor_py.SparseTensor(t.indices, values, t.dense_shape)) sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors) with variable_scope.variable_scope( None, default_name='linear_weights', values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[prev_size, num_outputs], dtype=dtypes.float32, initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, sparse_tensor, sparse_weights=None, combiner='sum', name='_weights') return variable, predictions
Example #26
Source File: encoders.py From lambda-packs with MIT License | 4 votes |
def bow_encoder(ids, vocab_size, embed_dim, sparse_lookup=True, initializer=None, regularizer=None, trainable=True, scope=None, reuse=None): """Maps a sequence of symbols to a vector per example by averaging embeddings. Args: ids: `[batch_size, doc_length]` `Tensor` or `SparseTensor` of type `int32` or `int64` with symbol ids. vocab_size: Integer number of symbols in vocabulary. embed_dim: Integer number of dimensions for embedding matrix. sparse_lookup: `bool`, if `True`, converts ids to a `SparseTensor` and performs a sparse embedding lookup. This is usually faster, but not desirable if padding tokens should have an embedding. Empty rows are assigned a special embedding. initializer: An initializer for the embeddings, if `None` default for current scope is used. regularizer: Optional regularizer for the embeddings. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional string specifying the variable scope for the op, required if `reuse=True`. reuse: If `True`, variables inside the op will be reused. Returns: Encoding `Tensor` `[batch_size, embed_dim]` produced by averaging embeddings. Raises: ValueError: If `embed_dim` or `vocab_size` are not specified. """ if not vocab_size or not embed_dim: raise ValueError('Must specify vocab size and embedding dimension') with variable_scope.variable_scope( scope, 'bow_encoder', [ids], reuse=reuse): embeddings = variables.model_variable( 'embeddings', shape=[vocab_size, embed_dim], initializer=initializer, regularizer=regularizer, trainable=trainable) if sparse_lookup: if isinstance(ids, sparse_tensor.SparseTensor): sparse_ids = ids else: sparse_ids = sparse_ops.dense_to_sparse_tensor(ids) return contrib_embedding_ops.safe_embedding_lookup_sparse( [embeddings], sparse_ids, combiner='mean', default_id=0) else: if isinstance(ids, sparse_tensor.SparseTensor): raise TypeError('ids are expected to be dense Tensor, got: %s', ids) return math_ops.reduce_mean( embedding_ops.embedding_lookup(embeddings, ids), reduction_indices=1)
Example #27
Source File: feature_column_ops.py From lambda-packs with MIT License | 4 votes |
def _create_joint_embedding_lookup(columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates an embedding lookup for all columns sharing a single weight.""" for arg in embedding_lookup_arguments: assert arg.weight_tensor is None, ( 'Joint sums for weighted sparse columns are not supported. ' 'Please use weighted_sum_from_feature_columns instead.') assert arg.combiner == 'sum', ( 'Combiners other than sum are not supported for joint sums. ' 'Please use weighted_sum_from_feature_columns instead.') assert len(embedding_lookup_arguments) >= 1, ( 'At least one column must be in the model.') prev_size = 0 sparse_tensors = [] for a in embedding_lookup_arguments: t = a.input_tensor values = t.values + prev_size prev_size += a.vocab_size sparse_tensors.append( sparse_tensor_py.SparseTensor(t.indices, values, t.dense_shape)) sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors) with variable_scope.variable_scope( None, default_name='linear_weights', values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[prev_size, num_outputs], dtype=dtypes.float32, initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) if fc._is_variable(variable): # pylint: disable=protected-access variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, sparse_tensor, sparse_weights=None, combiner='sum', name='_weights') return variable, predictions
Example #28
Source File: feature_column_ops.py From keras-lambda with MIT License | 4 votes |
def _create_joint_embedding_lookup(columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates an embedding lookup for all columns sharing a single weight.""" for arg in embedding_lookup_arguments: assert arg.weight_tensor is None, ( 'Joint sums for weighted sparse columns are not supported. ' 'Please use weighted_sum_from_feature_columns instead.') assert arg.combiner == 'sum', ( 'Combiners other than sum are not supported for joint sums. ' 'Please use weighted_sum_from_feature_columns instead.') assert len(embedding_lookup_arguments) >= 1, ( 'At least one column must be in the model.') prev_size = 0 sparse_tensors = [] for a in embedding_lookup_arguments: t = a.input_tensor values = t.values + prev_size prev_size += a.vocab_size sparse_tensors.append( sparse_tensor_py.SparseTensor(t.indices, values, t.dense_shape)) sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors) with variable_scope.variable_scope( None, default_name='linear_weights', values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[prev_size, num_outputs], dtype=dtypes.float32, initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, sparse_tensor, sparse_weights=None, combiner='sum', name='_weights') return variable, predictions
Example #29
Source File: encoders.py From keras-lambda with MIT License | 4 votes |
def bow_encoder(ids, vocab_size, embed_dim, sparse_lookup=True, initializer=None, regularizer=None, trainable=True, scope=None, reuse=None): """Maps a sequence of symbols to a vector per example by averaging embeddings. Args: ids: `[batch_size, doc_length]` `Tensor` or `SparseTensor` of type `int32` or `int64` with symbol ids. vocab_size: Integer number of symbols in vocabulary. embed_dim: Integer number of dimensions for embedding matrix. sparse_lookup: `bool`, if `True`, converts ids to a `SparseTensor` and performs a sparse embedding lookup. This is usually faster, but not desirable if padding tokens should have an embedding. Empty rows are assigned a special embedding. initializer: An initializer for the embeddings, if `None` default for current scope is used. regularizer: Optional regularizer for the embeddings. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional string specifying the variable scope for the op, required if `reuse=True`. reuse: If `True`, variables inside the op will be reused. Returns: Encoding `Tensor` `[batch_size, embed_dim]` produced by averaging embeddings. Raises: ValueError: If `embed_dim` or `vocab_size` are not specified. """ if not vocab_size or not embed_dim: raise ValueError('Must specify vocab size and embedding dimension') with variable_scope.variable_scope( scope, 'bow_encoder', [ids], reuse=reuse): embeddings = variables.model_variable( 'embeddings', shape=[vocab_size, embed_dim], initializer=initializer, regularizer=regularizer, trainable=trainable) if sparse_lookup: if isinstance(ids, sparse_tensor.SparseTensor): sparse_ids = ids else: sparse_ids = sparse_ops.dense_to_sparse_tensor(ids) return contrib_embedding_ops.safe_embedding_lookup_sparse( [embeddings], sparse_ids, combiner='mean', default_id=0) else: if isinstance(ids, sparse_tensor.SparseTensor): raise TypeError('ids are expected to be dense Tensor, got: %s', ids) return math_ops.reduce_mean( embedding_ops.embedding_lookup(embeddings, ids), reduction_indices=1)