Python tensorflow.VariableScope() Examples
The following are 8
code examples of tensorflow.VariableScope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: lstm_models.py From synvae with MIT License | 6 votes |
def build(self, hparams, is_training=True): self._total_length = hparams.max_seq_len if self._total_length != np.prod(self._level_lengths): raise ValueError( 'The product of the HierarchicalLstmEncoder level lengths (%d) must ' 'equal the padded input sequence length (%d).' % ( np.prod(self._level_lengths), self._total_length)) tf.logging.info('\nHierarchical Encoder:\n' ' input length: %d\n' ' level lengths: %s\n', self._total_length, self._level_lengths) self._hierarchical_encoders = [] num_splits = np.prod(self._level_lengths) for i, l in enumerate(self._level_lengths): num_splits //= l tf.logging.info('Level %d splits: %d', i, num_splits) h_encoder = self._core_encoder_cls() h_encoder.build( hparams, is_training, name_or_scope=tf.VariableScope( tf.AUTO_REUSE, 'encoder/hierarchical_level_%d' % i)) self._hierarchical_encoders.append((num_splits, h_encoder))
Example #2
Source File: train_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_define_model(self): FLAGS.batch_size = 2 images_shape = [FLAGS.batch_size, 4, 4, 3] images_np = np.zeros(shape=images_shape) images = tf.constant(images_np, dtype=tf.float32) labels = tf.one_hot([0] * FLAGS.batch_size, 2) model = train._define_model(images, labels) self.assertIsInstance(model, tfgan.StarGANModel) self.assertShapeEqual(images_np, model.generated_data) self.assertShapeEqual(images_np, model.reconstructed_data) self.assertTrue(isinstance(model.discriminator_variables, list)) self.assertTrue(isinstance(model.generator_variables, list)) self.assertIsInstance(model.discriminator_scope, tf.VariableScope) self.assertTrue(model.generator_scope, tf.VariableScope) self.assertTrue(callable(model.discriminator_fn)) self.assertTrue(callable(model.generator_fn))
Example #3
Source File: train_test.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def test_define_model(self): FLAGS.batch_size = 2 images_shape = [FLAGS.batch_size, 4, 4, 3] images_np = np.zeros(shape=images_shape) images = tf.constant(images_np, dtype=tf.float32) labels = tf.one_hot([0] * FLAGS.batch_size, 2) model = train._define_model(images, labels) self.assertIsInstance(model, tfgan.StarGANModel) self.assertShapeEqual(images_np, model.generated_data) self.assertShapeEqual(images_np, model.reconstructed_data) self.assertTrue(isinstance(model.discriminator_variables, list)) self.assertTrue(isinstance(model.generator_variables, list)) self.assertIsInstance(model.discriminator_scope, tf.VariableScope) self.assertTrue(model.generator_scope, tf.VariableScope) self.assertTrue(callable(model.discriminator_fn)) self.assertTrue(callable(model.generator_fn))
Example #4
Source File: specs_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def __init__(self, subnet, name=None, scope=None): """Create the Shared operator. Use this as: f = Shared(Cr(100, 3)) g = f | f | f Ordinarily, you do not need to provide either a name or a scope. Providing a name is useful if you want a well-defined namespace for the variables (e.g., for saving a subnet). Args: subnet: Definition of the shared network. name: Optional name for the shared context. scope: Optional shared scope (must be a Scope, not a string). Raises: ValueError: Scope is not of type tf.Scope, name is not of type string, or both scope and name are given together. """ if scope is not None and not isinstance(scope, tf.VariableScope): raise ValueError("scope must be None or a VariableScope") if name is not None and not isinstance(scope, str): raise ValueError("name must be None or a string") if scope is not None and name is not None: raise ValueError("cannot provide both a name and a scope") if name is None: name = "Shared_%d" % Shared.shared_number Shared.shared_number += 1 self.subnet = subnet self.name = name self.scope = scope
Example #5
Source File: tf_graph_utils.py From lmdis-rep with Apache License 2.0 | 5 votes |
def pair_vars_between_scope(src, dst, src_vars=None, dst_vars=None): def canonicalize_scope_name(s): if isinstance(s, tf.VariableScope): s = s.name return s + "/" def canonicalize_vars(vars, scope_path): if vars is None: vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) vd = dict() prefix_len = len(scope_path) for v in vars: if v.name.startswith(scope_path): vd[v.name[prefix_len:]] = v return vd src = canonicalize_scope_name(src) dst = canonicalize_scope_name(dst) src_vars = canonicalize_vars(src_vars, src) dst_vars = canonicalize_vars(dst_vars, dst) assert len(dst_vars) == len(src_vars) and all(k in dst_vars for k in src_vars), \ "variables mismatches" pair_list = [] for k, src_v in src_vars.items(): pair_list.append((src_v, dst_vars[k])) # (src, dst) return pair_list
Example #6
Source File: layers.py From fold with Apache License 2.0 | 5 votes |
def __init__(self, input_type=None, output_type=None, name_or_scope=None): """Creates the layer. Args: input_type: A type. output_type: A type. name_or_scope: A string or variable scope. If a string, a new variable scope will be created by calling [`create_variable_scope`](#create_variable_scope), with defaults inherited from the current variable scope. If no caching device is set, it will be set to `lambda op: op.device`. This is because `tf.while` can be very inefficient if the variables it uses are not cached locally. """ if name_or_scope is None: name_or_scope = type(self).__name__ if isinstance(name_or_scope, tf.VariableScope): self._vscope = name_or_scope name = str(self._vscope.name) elif isinstance(name_or_scope, six.string_types): self._vscope = create_variable_scope(name_or_scope) name = name_or_scope else: raise TypeError('name_or_scope must be a tf.VariableScope or a string: ' '%s' % (name_or_scope,)) if self._vscope.caching_device is None: self._vscope.set_caching_device(lambda op: op.device) super(Layer, self).__init__(input_type, output_type, name) if not hasattr(self, '_constructor_name'): self._constructor_name = '__.%s' % self.__class__.__name__ if not hasattr(self, '_constructor_args'): self._constructor_args = None if not hasattr(self, '_constructor_kwargs'): self._constructor_kwargs = None
Example #7
Source File: lstm_models.py From synvae with MIT License | 4 votes |
def build(self, hparams, is_training=True, name_or_scope='encoder'): if hparams.use_cudnn and hparams.residual_decoder: raise ValueError('Residual connections not supported in cuDNN.') self._is_training = is_training self._name_or_scope = name_or_scope self._use_cudnn = hparams.use_cudnn tf.logging.info('\nEncoder Cells (bidirectional):\n' ' units: %s\n', hparams.enc_rnn_size) if isinstance(name_or_scope, tf.VariableScope): name = name_or_scope.name reuse = name_or_scope.reuse else: name = name_or_scope reuse = None cells_fw = [] cells_bw = [] for i, layer_size in enumerate(hparams.enc_rnn_size): if self._use_cudnn: cells_fw.append(lstm_utils.cudnn_lstm_layer( [layer_size], hparams.dropout_keep_prob, is_training, name_or_scope=tf.VariableScope( reuse, name + '/cell_%d/bidirectional_rnn/fw' % i))) cells_bw.append(lstm_utils.cudnn_lstm_layer( [layer_size], hparams.dropout_keep_prob, is_training, name_or_scope=tf.VariableScope( reuse, name + '/cell_%d/bidirectional_rnn/bw' % i))) else: cells_fw.append( lstm_utils.rnn_cell( [layer_size], hparams.dropout_keep_prob, hparams.residual_encoder, is_training)) cells_bw.append( lstm_utils.rnn_cell( [layer_size], hparams.dropout_keep_prob, hparams.residual_encoder, is_training)) self._cells = (cells_fw, cells_bw)
Example #8
Source File: lstm_models.py From synvae with MIT License | 4 votes |
def _hierarchical_decode(self, z, base_decode_fn): """Depth first decoding from `z`, passing final embeddings to base fn.""" batch_size = z.shape[0] # Subtract 1 for the core decoder level. num_levels = len(self._level_lengths) - 1 hparams = self.hparams batch_size = hparams.batch_size def recursive_decode(initial_input, path=None): """Recursive hierarchical decode function.""" path = path or [] level = len(path) if level == num_levels: with tf.variable_scope('core_decoder', reuse=tf.AUTO_REUSE): return base_decode_fn(initial_input, path) scope = tf.VariableScope( tf.AUTO_REUSE, 'decoder/hierarchical_level_%d' % level) num_steps = self._level_lengths[level] with tf.variable_scope(scope): state = lstm_utils.initial_cell_state_from_embedding( self._hier_cells[level], initial_input, name='initial_state') if level not in self._disable_autoregression: # The initial input should be the same size as the tensors returned by # next level. if self._hierarchical_encoder: input_size = self._hierarchical_encoder.level(0).output_depth elif level == num_levels - 1: input_size = sum(nest.flatten(self._core_decoder.state_size)) else: input_size = sum(nest.flatten(self._hier_cells[level + 1].state_size)) next_input = tf.zeros([batch_size, input_size]) lower_level_embeddings = [] for i in range(num_steps): if level in self._disable_autoregression: next_input = tf.zeros([batch_size, 1]) else: next_input = tf.concat([next_input, initial_input], axis=1) with tf.variable_scope(scope): output, state = self._hier_cells[level](next_input, state, scope) next_input = recursive_decode(output, path + [i]) lower_level_embeddings.append(next_input) if self._hierarchical_encoder: # Return the encoding of the outputs using the appropriate level of the # hierarchical encoder. enc_level = num_levels - level return self._hierarchical_encoder.level(enc_level).encode( sequence=tf.stack(lower_level_embeddings, axis=1), sequence_length=tf.fill([batch_size], num_steps)) else: # Return the final state. return tf.concat(nest.flatten(state), axis=-1) return recursive_decode(z)