Python sonnet.Module() Examples

The following are 14 code examples of sonnet.Module(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sonnet , or try the search function .
Example #1
Source File: common.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def transformer_at_state(base_model, new_variables):
  """Get the base_model that has been transformed to use the variables
  in final_state.
  Args:
    base_model: snt.Module
      Goes from batch to features
    new_variables: list
      New list of variables to use
  Returns:
    func: callable of same api as base_model.
  """
  assert not variable_replace.in_variable_replace_scope()

  def _feature_transformer(input_data):
    """Feature transformer at the end of training."""
    initial_variables = base_model.get_variables()
    replacement = collections.OrderedDict(
        utils.eqzip(initial_variables, new_variables))
    with variable_replace.variable_replace(replacement):
      features = base_model(input_data)
    return features

  return _feature_transformer 
Example #2
Source File: agent.py    From bsuite with Apache License 2.0 6 votes vote down vote up
def make_ensemble(num_actions: int,
                  num_ensemble: int = 20,
                  num_hidden_layers: int = 2,
                  num_units: int = 50,
                  prior_scale: float = 3.) -> Sequence[snt.Module]:
  """Convenience function to make an ensemble from flags."""
  output_sizes = [num_units] * num_hidden_layers + [num_actions]
  ensemble = []
  for _ in range(num_ensemble):
    network = snt.Sequential([
        snt.Flatten(),
        snt.nets.MLP(output_sizes),
    ])
    prior_network = snt.Sequential([
        snt.Flatten(),
        snt.nets.MLP(output_sizes),
    ])
    ensemble.append(NetworkWithPrior(network, prior_network, prior_scale))
  return ensemble 
Example #3
Source File: dpf_kitti.py    From differentiable-particle-filters with MIT License 6 votes vote down vote up
def custom_build(self, inputs):
        """A custom build method to wrap into a sonnet Module."""
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = tf.nn.dropout(outputs,  self.placeholders['keep_prob'])
        outputs = snt.BatchFlatten()(outputs)
        outputs = snt.Linear(128)(outputs)
        outputs = tf.nn.relu(outputs)

        return outputs 
Example #4
Source File: common.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def transformer_at_state(base_model, new_variables):
  """Get the base_model that has been transformed to use the variables
  in final_state.
  Args:
    base_model: snt.Module
      Goes from batch to features
    new_variables: list
      New list of variables to use
  Returns:
    func: callable of same api as base_model.
  """
  assert not variable_replace.in_variable_replace_scope()

  def _feature_transformer(input_data):
    """Feature transformer at the end of training."""
    initial_variables = base_model.get_variables()
    replacement = collections.OrderedDict(
        utils.eqzip(initial_variables, new_variables))
    with variable_replace.variable_replace(replacement):
      features = base_model(input_data)
    return features

  return _feature_transformer 
Example #5
Source File: common.py    From models with Apache License 2.0 6 votes vote down vote up
def transformer_at_state(base_model, new_variables):
  """Get the base_model that has been transformed to use the variables
  in final_state.
  Args:
    base_model: snt.Module
      Goes from batch to features
    new_variables: list
      New list of variables to use
  Returns:
    func: callable of same api as base_model.
  """
  assert not variable_replace.in_variable_replace_scope()

  def _feature_transformer(input_data):
    """Feature transformer at the end of training."""
    initial_variables = base_model.get_variables()
    replacement = collections.OrderedDict(
        utils.eqzip(initial_variables, new_variables))
    with variable_replace.variable_replace(replacement):
      features = base_model(input_data)
    return features

  return _feature_transformer 
Example #6
Source File: common.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def transformer_at_state(base_model, new_variables):
  """Get the base_model that has been transformed to use the variables
  in final_state.
  Args:
    base_model: snt.Module
      Goes from batch to features
    new_variables: list
      New list of variables to use
  Returns:
    func: callable of same api as base_model.
  """
  assert not variable_replace.in_variable_replace_scope()

  def _feature_transformer(input_data):
    """Feature transformer at the end of training."""
    initial_variables = base_model.get_variables()
    replacement = collections.OrderedDict(
        utils.eqzip(initial_variables, new_variables))
    with variable_replace.variable_replace(replacement):
      features = base_model(input_data)
    return features

  return _feature_transformer 
Example #7
Source File: agent.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               network: snt.Module,
               prior_network: snt.Module,
               prior_scale: float = 1.):
    super().__init__(name='network_with_prior')
    self._network = network
    self._prior_network = prior_network
    self._prior_scale = prior_scale 
Example #8
Source File: agent.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def __init__(
      self,
      action_spec: specs.DiscreteArray,
      network: snt.Module,
      batch_size: int,
      discount: float,
      replay_capacity: int,
      min_replay_size: int,
      sgd_period: int,
      target_update_period: int,
      optimizer: snt.Optimizer,
      epsilon: float,
      seed: int = None,
  ):

    # Internalise hyperparameters.
    self._num_actions = action_spec.num_values
    self._discount = discount
    self._batch_size = batch_size
    self._sgd_period = sgd_period
    self._target_update_period = target_update_period
    self._epsilon = epsilon
    self._min_replay_size = min_replay_size

    # Seed the RNG.
    tf.random.set_seed(seed)
    self._rng = np.random.RandomState(seed)

    # Internalise the components (networks, optimizer, replay buffer).
    self._optimizer = optimizer
    self._replay = replay.Replay(capacity=replay_capacity)
    self._online_network = network
    self._target_network = copy.deepcopy(network)
    self._forward = tf.function(network)
    self._total_steps = tf.Variable(0) 
Example #9
Source File: mnist_multi_gpu_sonnet.py    From mnist-multi-gpu with Apache License 2.0 5 votes vote down vote up
def custom_build(inputs, is_training, keep_prob):
  x_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
  """A custom build method to wrap into a sonnet Module."""
  outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(x_inputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=1024, kernel_shape=1, stride=1)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = snt.BatchFlatten()(outputs)
  outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
  outputs = snt.Linear(output_size=10)(outputs)
#  _activation_summary(outputs)
  return outputs 
Example #10
Source File: mnist_multi_gpu_sonnet.py    From mnist-multi-gpu with Apache License 2.0 5 votes vote down vote up
def evaluate():
    """Eval MNIST for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels for MNIST.
        mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
        images = mnist.test.images
        labels = mnist.test.labels

        # Build a Graph that computes the logits predictions from the
        # inference model.
        # The line below takes custom_build and wraps it to construct a sonnet Module.
        module_with_build_args = snt.Module(custom_build, name='simple_net')
        test_model_outputs = module_with_build_args(images, is_training=False,
                                                    keep_prob=tf.constant(1.0))

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(predictions=test_model_outputs, targets=labels, k=1)

        # Create saver to restore the learned variables for eval.
        saver = tf.train.Saver()

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                print('No checkpoint file found')
                return

            predictions = np.sum(sess.run([top_k_op]))

            # Compute precision.
            print('%s: precision = %.3f' % (datetime.now(), predictions)) 
Example #11
Source File: probe.py    From stacked_capsule_autoencoders with Apache License 2.0 5 votes vote down vote up
def classification_probe(features, labels, n_classes, labeled=None):
  """Classification probe with stopped gradient on features."""

  def _classification_probe(features):
    logits = snt.Linear(n_classes)(tf.stop_gradient(features))
    xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                        labels=labels)
    if labeled is not None:
      xe = xe * tf.to_float(labeled)
    xe = tf.reduce_mean(xe)
    acc = tf.reduce_mean(tf.to_float(tf.equal(tf.argmax(logits, axis=1),
                                              labels)))
    return xe, acc

  return snt.Module(_classification_probe)(features) 
Example #12
Source File: agent.py    From bsuite with Apache License 2.0 4 votes vote down vote up
def __init__(
      self,
      obs_spec: specs.Array,
      action_spec: specs.DiscreteArray,
      ensemble: Sequence[snt.Module],
      batch_size: int,
      discount: float,
      replay_capacity: int,
      min_replay_size: int,
      sgd_period: int,
      target_update_period: int,
      optimizer: snt.Optimizer,
      mask_prob: float,
      noise_scale: float,
      epsilon_fn: Callable[[int], float] = lambda _: 0.,
      seed: int = None,
  ):
    """Bootstrapped DQN with additive prior functions."""
    # Agent components.
    self._ensemble = ensemble
    self._forward = [tf.function(net) for net in ensemble]
    self._target_ensemble = [copy.deepcopy(network) for network in ensemble]
    self._num_ensemble = len(ensemble)
    self._optimizer = optimizer
    self._replay = replay.Replay(capacity=replay_capacity)

    # Create variables for each network in the ensemble
    for network in ensemble:
      snt.build(network, (None, *obs_spec.shape))

    # Agent hyperparameters.
    self._num_actions = action_spec.num_values
    self._batch_size = batch_size
    self._sgd_period = sgd_period
    self._target_update_period = target_update_period
    self._min_replay_size = min_replay_size
    self._epsilon_fn = epsilon_fn
    self._mask_prob = mask_prob
    self._noise_scale = noise_scale
    self._rng = np.random.RandomState(seed)
    self._discount = discount

    # Agent state.
    self._total_steps = tf.Variable(1)
    self._active_head = 0
    tf.random.set_seed(seed) 
Example #13
Source File: mnist_multi_gpu_sonnet.py    From mnist-multi-gpu with Apache License 2.0 4 votes vote down vote up
def tower_loss(scope):
    """Calculate the total loss on a single tower running the MNIST model.

    Args:
      scope: unique prefix string identifying the MNIST tower, e.g. 'tower_0'

    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """
    # Input images and labels.

    images, labels = inputs(train=True, batch_size=FLAGS.batch_size,
                            num_epochs=(FLAGS.num_epochs / FLAGS.num_gpus))
    # Build inference Graph.
    # The line below takes custom_build and
    # wraps it to construct a sonnet Module.
    module_with_build_args = snt.Module(custom_build, name='simple_net')

    train_model_outputs = module_with_build_args(images, is_training=True,
                                                 keep_prob=tf.constant(0.5))

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = loss(train_model_outputs, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do
    # the same for the averaged version of the losses.
    if FLAGS.tb_logging:
        for l in losses + [total_loss]:
            # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
            # training session. This helps the clarity of presentation on
            # tensorboard.
            loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
            tf.summary.scalar(loss_name, l)

    return total_loss 
Example #14
Source File: dpf_kitti.py    From differentiable-particle-filters with MIT License 4 votes vote down vote up
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
        """
        :param min_obs_likelihood:
        :param proposer_keep_ratio:
        :return: None
        """

        # MEASUREMENT MODEL

        # conv net for encoding the image
        self.encoder = snt.Sequential([
            snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
            snt.BatchFlatten(),
            lambda x: tf.nn.dropout(x,  self.placeholders['keep_prob']),
            snt.Linear(128, name='encoder/linear'),
            tf.nn.relu
        ])

        # observation likelihood estimator that maps states and image encodings to probabilities
        self.obs_like_estimator = snt.Sequential([
            snt.Linear(128, name='obs_like_estimator/linear'),
            tf.nn.relu,
            snt.Linear(128, name='obs_like_estimator/linear'),
            tf.nn.relu,
            snt.Linear(1, name='obs_like_estimator/linear'),
            tf.nn.sigmoid,
            lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
        ], name='obs_like_estimator')

        # motion noise generator used for motion sampling
        if learn_gaussian_mle:
            self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
        else:
            self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')

        # odometry model (if we want to learn it)
        if self.learn_odom:
            self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')

        # particle proposer that maps encodings to particles (if we want to use it)
        if self.use_proposer:
            self.particle_proposer = snt.Sequential([
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                lambda x: tf.nn.dropout(x,  proposer_keep_ratio),
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(128, name='particle_proposer/linear'),
                tf.nn.relu,
                snt.Linear(4, name='particle_proposer/linear'),
                tf.nn.tanh,
            ])

        self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
        self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))