Python sonnet.Sequential() Examples

The following are 24 code examples of sonnet.Sequential(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sonnet , or try the search function .
Example #1
Source File: agent.py    From bsuite with Apache License 2.0 6 votes vote down vote up
def make_ensemble(num_actions: int,
                  num_ensemble: int = 20,
                  num_hidden_layers: int = 2,
                  num_units: int = 50,
                  prior_scale: float = 3.) -> Sequence[snt.Module]:
  """Convenience function to make an ensemble from flags."""
  output_sizes = [num_units] * num_hidden_layers + [num_actions]
  ensemble = []
  for _ in range(num_ensemble):
    network = snt.Sequential([
        snt.Flatten(),
        snt.nets.MLP(output_sizes),
    ])
    prior_network = snt.Sequential([
        snt.Flatten(),
        snt.nets.MLP(output_sizes),
    ])
    ensemble.append(NetworkWithPrior(network, prior_network, prior_scale))
  return ensemble 
Example #2
Source File: agent.py    From bsuite with Apache License 2.0 6 votes vote down vote up
def default_agent(obs_spec: specs.Array,
                  action_spec: specs.DiscreteArray):
  """Initialize a DQN agent with default parameters."""
  del obs_spec  # Unused.
  network = snt.Sequential([
      snt.Flatten(),
      snt.nets.MLP([50, 50, action_spec.num_values]),
  ])
  optimizer = snt.optimizers.Adam(learning_rate=1e-3)
  return DQN(
      action_spec=action_spec,
      network=network,
      batch_size=32,
      discount=0.99,
      replay_capacity=10000,
      min_replay_size=100,
      sgd_period=1,
      target_update_period=4,
      optimizer=optimizer,
      epsilon=0.05,
      seed=42) 
Example #3
Source File: robust_model.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def add_train_ops(self,
                    num_classes,
                    joint_rep,
                    minibatch):
    """Add ops for training in the computation graph.

    Args:
      num_classes: number of classes to predict in the task.
      joint_rep: the joint sentence representation if the input is sentence
        pairs or the representation for the sentence if the input is a single
        sentence.
      minibatch: a minibatch of sequences of embeddings.
    Returns:
      train_accuracy: the accuracy on the training dataset
      loss: training loss.
      opt_step: training op.
    """
    if self.linear_classifier is None:
      classifier_layers = []
      classifier_layers.append(snt.Linear(num_classes))
      self.linear_classifier = snt.Sequential(classifier_layers)
    logits = self.linear_classifier(joint_rep)
    # Losses and optimizer.
    def get_loss(logits, labels):
      return tf.reduce_mean(
          tf.nn.sparse_softmax_cross_entropy_with_logits(
              labels=labels, logits=logits))

    loss = get_loss(logits, minibatch.sentiment)
    train_accuracy = utils.get_accuracy(logits, minibatch.sentiment)
    opt_step = self._add_optimize_op(loss)
    return train_accuracy, loss, opt_step 
Example #4
Source File: models.py    From graph_nets with Apache License 2.0 5 votes vote down vote up
def make_mlp_model():
  """Instantiates a new MLP, followed by LayerNorm.

  The parameters of each new MLP are not shared with others generated by
  this function.

  Returns:
    A Sonnet module which contains the MLP and LayerNorm.
  """
  return snt.Sequential([
      snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),
      snt.LayerNorm(axis=-1, create_offset=True, create_scale=True)
  ]) 
Example #5
Source File: models.py    From graph_nets with Apache License 2.0 5 votes vote down vote up
def make_mlp_model():
  """Instantiates a new MLP, followed by LayerNorm.

  The parameters of each new MLP are not shared with others generated by
  this function.

  Returns:
    A Sonnet module which contains the MLP and LayerNorm.
  """
  return snt.Sequential([
      snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),
      snt.LayerNorm()
  ]) 
Example #6
Source File: neural.py    From attend_infer_repeat with GNU General Public License v3.0 5 votes vote down vote up
def _build(self, inpt):
            layers = []
            for n_hidden, hidden_transfer in zip(self._n_hiddens, self._hidden_transfers):
                layers.append(Affine(n_hidden, hidden_transfer, self._initializers))

            if self._n_out is not None:
                layers.append(Affine(self._n_out, self._transfer, self._initializers))

            module = snt.Sequential(layers)
            return module(inpt) 
Example #7
Source File: modules.py    From attend_infer_repeat with GNU General Public License v3.0 5 votes vote down vote up
def _build(self, inpt):
        n = np.prod(self._output_size)
        mlp = MLP(self._n_hidden, n_out=n)
        reshape = snt.BatchReshape(self._output_size)
        seq = snt.Sequential([mlp, reshape])
        return seq(inpt) 
Example #8
Source File: modules.py    From attend_infer_repeat with GNU General Public License v3.0 5 votes vote down vote up
def _build(self, inpt):
        flat = snt.BatchFlatten()
        mlp = MLP(self._n_hidden)
        seq = snt.Sequential([flat, mlp])
        return seq(inpt) 
Example #9
Source File: modules.py    From attend_infer_repeat with GNU General Public License v3.0 5 votes vote down vote up
def _embed(self, inpt):
        flatten = snt.BatchFlatten()
        mlp = MLP(self._n_hidden, n_out=self._n_param)
        seq = snt.Sequential([flatten, mlp])
        return seq(inpt) 
Example #10
Source File: core.py    From kglib with Apache License 2.0 5 votes vote down vote up
def _node_model(self):
        return snt.Sequential([self._thing_embedder,
                               snt.nets.MLP([self._latent_size] * self._num_layers, activate_final=True),
                               snt.LayerNorm()]) 
Example #11
Source File: core.py    From kglib with Apache License 2.0 5 votes vote down vote up
def _edge_model(self):
        return snt.Sequential([self._role_embedder,
                               snt.nets.MLP([self._latent_size] * self._num_layers, activate_final=True),
                               snt.LayerNorm()]) 
Example #12
Source File: core.py    From kglib with Apache License 2.0 5 votes vote down vote up
def make_mlp_model(latent_size=16, num_layers=2):
    """Instantiates a new MLP, followed by LayerNorm.

    The parameters of each new MLP are not shared with others generated by
    this function.

    Returns:
      A Sonnet module which contains the MLP and LayerNorm.
    """
    return snt.Sequential([
        snt.nets.MLP([latent_size] * num_layers, activate_final=True),
        snt.LayerNorm()
    ]) 
Example #13
Source File: attribute.py    From kglib with Apache License 2.0 5 votes vote down vote up
def _build(self, attribute_value):
        tf.summary.histogram('cont_attribute_value_histogram', attribute_value)
        embedding = snt.Sequential([
            snt.nets.MLP([self._attr_embedding_dim] * 3, activate_final=True, use_dropout=True),
            snt.LayerNorm(),
        ])(tf.cast(attribute_value, dtype=tf.float32))
        tf.summary.histogram('cont_embedding_histogram', embedding)
        return embedding 
Example #14
Source File: rnn.py    From differentiable-particle-filters with MIT License 5 votes vote down vote up
def __init__(self, init_with_true_state=False, model='2lstm', **unused_kwargs):

        self.placeholders = {'o': tf.placeholder('float32', [None, None, 24, 24, 3], 'observations'),
                     'a': tf.placeholder('float32', [None, None, 3], 'actions'),
                     's': tf.placeholder('float32', [None, None, 3], 'states'),
                     'keep_prob': tf.placeholder('float32')}
        self.pred_states = None
        self.init_with_true_state = init_with_true_state
        self.model = model

        # build models
        # <-- observation
        self.encoder = snt.Sequential([
            snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
            snt.BatchFlatten(),
            lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
            snt.Linear(128, name='encoder/Linear'),
            tf.nn.relu,
        ])

        # <-- action
        if self.model == '2lstm':
            self.rnn1 = snt.LSTM(512)
            self.rnn2 = snt.LSTM(512)
        if self.model == '2gru':
            self.rnn1 = snt.GRU(512)
            self.rnn2 = snt.GRU(512)
        elif self.model == 'ff':
            self.ff_lstm_replacement = snt.Sequential([
                snt.Linear(512),
                tf.nn.relu,
                snt.Linear(512),
                tf.nn.relu])

        self.belief_decoder = snt.Sequential([
            snt.Linear(256),
            tf.nn.relu,
            snt.Linear(256),
            tf.nn.relu,
            snt.Linear(3)
        ]) 
Example #15
Source File: problems.py    From learning-to-learn with Apache License 2.0 5 votes vote down vote up
def mnist(layers,  # pylint: disable=invalid-name
          activation="sigmoid",
          batch_size=128,
          mode="train"):
  """Mnist classification with a multi-layer perceptron."""

  if activation == "sigmoid":
    activation_op = tf.sigmoid
  elif activation == "relu":
    activation_op = tf.nn.relu
  else:
    raise ValueError("{} activation not supported".format(activation))

  # Data.
  data = mnist_dataset.load_mnist()
  data = getattr(data, mode)
  images = tf.constant(data.images, dtype=tf.float32, name="MNIST_images")
  images = tf.reshape(images, [-1, 28, 28, 1])
  labels = tf.constant(data.labels, dtype=tf.int64, name="MNIST_labels")

  # Network.
  mlp = snt.nets.MLP(list(layers) + [10],
                     activation=activation_op,
                     initializers=_nn_initializers)
  network = snt.Sequential([snt.BatchFlatten(), mlp])

  def build():
    indices = tf.random_uniform([batch_size], 0, data.num_examples, tf.int64)
    batch_images = tf.gather(images, indices)
    batch_labels = tf.gather(labels, indices)
    output = network(batch_images)
    return _xent_loss(output, batch_labels)

  return build 
Example #16
Source File: agent.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_sizes: Sequence[int],
               action_spec: specs.DiscreteArray):
    super().__init__(name='policy_value_net')
    self._torso = snt.Sequential([
        snt.Flatten(),
        snt.nets.MLP(hidden_sizes, activate_final=True),
    ])
    self._policy_head = snt.Linear(action_spec.num_values)
    self._value_head = snt.Linear(1)
    self._action_dtype = action_spec.dtype 
Example #17
Source File: run.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def run(bsuite_id: str) -> str:
  """Runs a DQN agent on a given bsuite environment, logging to CSV."""

  env = bsuite.load_and_record(
      bsuite_id=bsuite_id,
      save_path=FLAGS.save_path,
      logging_mode=FLAGS.logging_mode,
      overwrite=FLAGS.overwrite,
  )

  # Making the networks.
  hidden_units = [FLAGS.num_units] * FLAGS.num_hidden_layers
  network = snt.Sequential([
      snt.Flatten(),
      snt.nets.MLP(hidden_units + [env.action_spec().num_values]),
  ])
  optimizer = snt.optimizers.Adam(learning_rate=FLAGS.learning_rate)

  agent = dqn.DQN(
      action_spec=env.action_spec(),
      network=network,
      batch_size=FLAGS.batch_size,
      discount=FLAGS.discount,
      replay_capacity=FLAGS.replay_capacity,
      min_replay_size=FLAGS.min_replay_size,
      sgd_period=FLAGS.sgd_period,
      target_update_period=FLAGS.target_update_period,
      optimizer=optimizer,
      epsilon=FLAGS.epsilon,
      seed=FLAGS.seed,
  )

  num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
  experiment.run(
      agent=agent,
      environment=env,
      num_episodes=num_episodes,
      verbose=FLAGS.verbose)

  return bsuite_id 
Example #18
Source File: codec.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _build(self, inputs):
        if self._input_encoders:
            inputs = snt.Sequential(self._input_encoders)(inputs)
        return self._decoder(inputs) 
Example #19
Source File: codec.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _build(self, inputs):
        hparams = self._hparams
        hidden = snt.Sequential([
            util.concat_features,
            util.make_mlp(
                hparams,
                hparams.obs_decoder_fc_hidden_layers,
                activate_final=True),
        ])(inputs)
        return (self._build_game_output(hidden),
                self._build_score(hidden),
                self._build_game_over(hidden)) 
Example #20
Source File: periodic_inv_cov_update_kfac_opt_test.py    From kfac with Apache License 2.0 5 votes vote down vote up
def test_train(self):
    image = tf.random_uniform(shape=(_BATCH_SIZE, 784), maxval=1.)
    labels = tf.random_uniform(shape=(_BATCH_SIZE,), maxval=10, dtype=tf.int32)
    labels_one_hot = tf.one_hot(labels, 10)

    model = snt.Sequential([snt.BatchFlatten(), snt.nets.MLP([128, 128, 10])])
    logits = model(image)
    all_losses = tf.nn.softmax_cross_entropy_with_logits_v2(
        logits=logits, labels=labels_one_hot)
    loss = tf.reduce_mean(all_losses)
    layers = layer_collection.LayerCollection()
    optimizer = periodic_inv_cov_update_kfac_opt.PeriodicInvCovUpdateKfacOpt(
        invert_every=10,
        cov_update_every=1,
        learning_rate=0.03,
        cov_ema_decay=0.95,
        damping=100.,
        layer_collection=layers,
        momentum=0.9,
        num_burnin_steps=0,
        placement_strategy="round_robin")
    _construct_layer_collection(layers, [logits], tf.trainable_variables())

    train_step = optimizer.minimize(loss)
    counter = optimizer.counter
    max_iterations = 50

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      coord = tf.train.Coordinator()
      tf.train.start_queue_runners(sess=sess, coord=coord)
      for iteration in range(max_iterations):
        sess.run([loss, train_step])
        counter_ = sess.run(counter)
        self.assertEqual(counter_, iteration + 1.0) 
Example #21
Source File: cmn.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 4 votes vote down vote up
def _construct_weights(self):
        """
        Constructs the user/item memories and user/item external memory/outputs

        Also add the embedding lookups
        """
        self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     name='MemoryEmbed')

        self.user_output = snt.Embed(self.config.user_count, self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     name='MemoryOutput')

        self.item_memory = snt.Embed(self.config.item_count,
                                     self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     name="ItemMemory")
        self._mem_layer = VariableLengthMemoryLayer(self.config.hops,
                                                    self.config.embed_size,
                                                    tf.nn.relu,
                                                    initializers=self._hops_init,
                                                    regularizers=self._regularizers,
                                                    name='UserMemoryLayer')

        self._output_module = snt.Sequential([
            DenseLayer(self.config.embed_size, True, tf.nn.relu,
                       initializers=self._initializers,
                       regularizers=self._regularizers,
                       name='Layer'),
            snt.Linear(1, False,
                       initializers=self._output_initializers,
                       regularizers=self._regularizers,
                       name='OutputVector'),
            tf.squeeze])

        # [batch, embedding size]
        self._cur_user = self.user_memory(self.input_users)
        self._cur_user_output = self.user_output(self.input_users)

        # Item memories a query
        self._cur_item = self.item_memory(self.input_items)
        self._cur_item_negative = self.item_memory(self.input_items_negative)

        # Share Embeddings
        self._cur_item_output = self._cur_item
        self._cur_item_output_negative = self._cur_item_negative 
Example #22
Source File: image.py    From stacked_capsule_autoencoders with Apache License 2.0 4 votes vote down vote up
def create(which,
           batch_size,
           subset=None,
           n_replicas=1,
           transforms=None,
           **kwargs):
  """Creates data loaders according to the dataset name `which`."""

  func = globals().get('_create_{}'.format(which), None)
  if func is None:
    raise ValueError('Dataset "{}" not supported. Only {} are'
                     ' supported.'.format(which, SUPPORTED_DATSETS))

  dataset = func(subset, batch_size, **kwargs)

  if transforms is not None:
    if not isinstance(transforms, dict):
      transforms = {'image': transforms}

    for k, v in transforms.items():
      transforms[k] = snt.Sequential(nest.flatten(v))

  if transforms is not None or n_replicas > 1:

    def map_func(data):
      """Replicates data if necessary."""
      data = dict(data)

      if n_replicas > 1:
        tile_by_batch = snt.TileByDim([0], [n_replicas])
        data = {k: tile_by_batch(v) for k, v in data.items()}

      if transforms is not None:
        img = data['image']

        for k, transform in transforms.items():
          data[k] = transform(img)

      return data

    dataset = dataset.map(map_func)

  iter_data = dataset.make_one_shot_iterator()
  input_batch = iter_data.get_next()
  for _, v in input_batch.items():
    v.set_shape([batch_size * n_replicas] + v.shape[1:].as_list())

  return input_batch 
Example #23
Source File: dpf.py    From differentiable-particle-filters with MIT License 4 votes vote down vote up
def build_modules(self, min_obs_likelihood, proposer_keep_ratio):
        """
        :param min_obs_likelihood:
        :param proposer_keep_ratio:
        :return: None
        """

        # MEASUREMENT MODEL

        # conv net for encoding the image
        self.encoder = snt.Sequential([
            snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
            snt.BatchFlatten(),
            lambda x: tf.nn.dropout(x,  self.placeholders['keep_prob']),
            snt.Linear(128, name='encoder/linear'),
            tf.nn.relu
        ])

        # observation likelihood estimator that maps states and image encodings to probabilities
        self.obs_like_estimator = snt.Sequential([
            snt.Linear(128, name='obs_like_estimator/linear'),
            tf.nn.relu,
            snt.Linear(128, name='obs_like_estimator/linear'),
            tf.nn.relu,
            snt.Linear(1, name='obs_like_estimator/linear'),
            tf.nn.sigmoid,
            lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
        ], name='obs_like_estimator')

        # motion noise generator used for motion sampling
        self.mo_noise_generator = snt.nets.MLP([32, 32, self.state_dim], activate_final=False, name='mo_noise_generator')

        # odometry model (if we want to learn it)
        if self.learn_odom:
            self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')

        # particle proposer that maps encodings to particles (if we want to use it)
        if self.use_proposer:
            self.particle_proposer = snt.Sequential([
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                lambda x: tf.nn.dropout(x,  proposer_keep_ratio),
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(4, name='particle_proposer/linear'),
                tf.nn.tanh,
            ]) 
Example #24
Source File: dpf_kitti.py    From differentiable-particle-filters with MIT License 4 votes vote down vote up
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
        """
        :param min_obs_likelihood:
        :param proposer_keep_ratio:
        :return: None
        """

        # MEASUREMENT MODEL

        # conv net for encoding the image
        self.encoder = snt.Sequential([
            snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
            snt.BatchFlatten(),
            lambda x: tf.nn.dropout(x,  self.placeholders['keep_prob']),
            snt.Linear(128, name='encoder/linear'),
            tf.nn.relu
        ])

        # observation likelihood estimator that maps states and image encodings to probabilities
        self.obs_like_estimator = snt.Sequential([
            snt.Linear(128, name='obs_like_estimator/linear'),
            tf.nn.relu,
            snt.Linear(128, name='obs_like_estimator/linear'),
            tf.nn.relu,
            snt.Linear(1, name='obs_like_estimator/linear'),
            tf.nn.sigmoid,
            lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
        ], name='obs_like_estimator')

        # motion noise generator used for motion sampling
        if learn_gaussian_mle:
            self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
        else:
            self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')

        # odometry model (if we want to learn it)
        if self.learn_odom:
            self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')

        # particle proposer that maps encodings to particles (if we want to use it)
        if self.use_proposer:
            self.particle_proposer = snt.Sequential([
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                lambda x: tf.nn.dropout(x,  proposer_keep_ratio),
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(4, name='particle_proposer/linear'),
                tf.nn.tanh,
            ])

        self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
        self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))