Python sonnet.Linear() Examples

The following are 30 code examples of sonnet.Linear(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sonnet , or try the search function .
Example #1
Source File: layers.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def _build(self, inputs):
        """
        Perform dense/fully connected layer with a activation function
        """
        self._layer = snt.Linear(self._output_size, self._add_bias, self._initializers,
                                 self._partitioners, self._regularizers, name='LinearWx')
        output = self._layer(inputs)
        # Add GraphKeys
        if self._add_bias:
            tf.add_to_collection(GraphKeys.BIASES, self._layer.b)

        tf.add_to_collection(GraphKeys.WEIGHTS, self._layer.w)
        tf.add_to_collection(GraphKeys.PRE_ACTIVATIONS, output)

        if self._activation_fn is None or self._activation_fn == tf.identity:
            return output

        output = self._activation_fn(output)

        # Add to GraphKeys for activation output
        tf.add_to_collection(GraphKeys.ACTIVATIONS, output)
        return output

    # Below are just convenience to access properties from the underlying layer 
Example #2
Source File: dpf_kitti.py    From differentiable-particle-filters with MIT License 6 votes vote down vote up
def custom_build(self, inputs):
        """A custom build method to wrap into a sonnet Module."""
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = tf.nn.dropout(outputs,  self.placeholders['keep_prob'])
        outputs = snt.BatchFlatten()(outputs)
        outputs = snt.Linear(128)(outputs)
        outputs = tf.nn.relu(outputs)

        return outputs 
Example #3
Source File: bounds_test.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def testFCIntervalBounds(self):
    m = snt.Linear(1, initializers={
        'w': tf.constant_initializer(1.),
        'b': tf.constant_initializer(2.),
    })
    z = tf.constant([[1, 2, 3]], dtype=tf.float32)
    m(z)  # Connect to create weights.
    m = ibp.LinearFCWrapper(m)
    input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
    output_bounds = m.propagate_bounds(input_bounds)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      l, u = sess.run([output_bounds.lower, output_bounds.upper])
      l = l.item()
      u = u.item()
      self.assertAlmostEqual(5., l)
      self.assertAlmostEqual(11., u) 
Example #4
Source File: model_test.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def testVerifiableModelWrapperResnet(self):
    def _build(z0, is_training=False):  # pylint: disable=unused-argument
      input_size = np.prod(z0.shape[1:])
      # We make a resnet-like structure.
      z = snt.Linear(input_size)(z0)
      z_left = tf.nn.relu(z)
      z_left = snt.Linear(input_size)(z_left)
      z = z_left + z0
      return snt.Linear(2)(z)

    z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
    wrapper = ibp.VerifiableModelWrapper(_build)
    logits = wrapper(z)
    self.assertLen(wrapper.input_wrappers, 1)
    self.assertLen(wrapper.modules, 5)
    # Check input has fanout 2, as it is the start of the resnet block.
    self.assertEqual(wrapper.fanout_of(wrapper.input_wrappers[0]), 2)
    for module in wrapper.modules:
      self.assertEqual(wrapper.fanout_of(module), 1)
    # Check propagation.
    self._propagation_test(wrapper, z, logits) 
Example #5
Source File: model.py    From leo with Apache License 2.0 6 votes vote down vote up
def decoder(self, inputs):
    with tf.variable_scope("decoder"):
      l2_regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
      orthogonality_reg = get_orthogonality_regularizer(
          self._orthogonality_penalty_weight)
      initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
      # 2 * embedding_dim, because we are returning means and variances
      decoder_module = snt.Linear(
          2 * self.embedding_dim,
          use_bias=False,
          regularizers={"w": l2_regularizer},
          initializers={"w": initializer},
      )
      outputs = snt.BatchApply(decoder_module)(inputs)
      self._orthogonality_reg = orthogonality_reg(decoder_module.w)
      return outputs 
Example #6
Source File: goal_nav_agent.py    From streetlearn with Apache License 2.0 6 votes vote down vote up
def _head(self, policy_input, heading, xy, target_xy):
    """Build the head of the agent: linear policy and value function, and pass
    the auxiliary outputs through.
    """

    # Linear policy and value function.
    policy_logits = snt.Linear(
        self._num_actions, name='policy_logits')(policy_input)
    baseline = tf.squeeze(snt.Linear(1, name='baseline')(policy_input), axis=-1)

    # Sample an action from the policy.
    new_action = tf.multinomial(
        policy_logits, num_samples=1, output_dtype=tf.int32)
    new_action = tf.squeeze(new_action, 1, name='new_action')

    return AgentOutput(
        new_action, policy_logits, baseline, heading, xy, target_xy) 
Example #7
Source File: autoencoder_mnist.py    From kfac with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               output_sizes,
               regularizers=None,
               initializers=None,
               custom_getter=None,
               activation=_NONLINEARITY,
               activate_final=False,
               name='MLP'):

    super(MLPManualReg, self).__init__(custom_getter=custom_getter, name=name)

    self._output_sizes = output_sizes
    self._activation = activation
    self._activate_final = activate_final

    with self._enter_variable_scope():
      self._layers = [snt.Linear(self._output_sizes[i],
                                 name='linear_{}'.format(i),
                                 initializers=initializers,
                                 regularizers=regularizers,
                                 custom_getter=custom_getter,
                                 use_bias=True)
                      for i in range(len(self._output_sizes))] 
Example #8
Source File: agent.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_sizes: Sequence[int], num_actions: int):
    super().__init__(name='policy_value_net')
    self._torso = snt.nets.MLP(hidden_sizes, activate_final=True, name='torso')
    self._core = snt.LSTM(hidden_sizes[-1], name='rnn')
    self._policy_head = snt.Linear(num_actions, name='policy_head')
    self._value_head = snt.Linear(1, name='value_head') 
Example #9
Source File: common.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def _build(self, x):
    x = tf.to_float(x)
    initializers={"w": tf.truncated_normal_initializer(stddev=0.01)}
    lin = snt.Linear(self.size, use_bias=False, initializers=initializers)
    z = lin(x)

    scale = tf.constant(1., dtype=tf.float32)
    offset = tf.get_variable(
        "b",
        shape=[1, z.shape.as_list()[1]],
        initializer=tf.truncated_normal_initializer(stddev=0.1),
        dtype=tf.float32
    )

    mean, var = tf.nn.moments(z, [0], keep_dims=True)
    z = ((z - mean) * tf.rsqrt(var + 1e-6)) * scale + offset

    x_p = self.activation_fn(z)

    return z, x_p

  # This needs to work by string name sadly due to how the variable replace
  # works and would also work even if the custom getter approuch was used.
  # This is verbose, but it should atleast be clear as to what is going on.
  # TODO(lmetz) a better way to do this (the next 3 functions:
  #    _raw_name, w(), b() ) 
Example #10
Source File: mnist_multi_gpu_sonnet.py    From mnist-multi-gpu with Apache License 2.0 5 votes vote down vote up
def custom_build(inputs, is_training, keep_prob):
  x_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
  """A custom build method to wrap into a sonnet Module."""
  outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(x_inputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=1024, kernel_shape=1, stride=1)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = snt.BatchFlatten()(outputs)
  outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
  outputs = snt.Linear(output_size=10)(outputs)
#  _activation_summary(outputs)
  return outputs 
Example #11
Source File: networks.py    From learning-to-learn with Apache License 2.0 5 votes vote down vote up
def _get_initializers(initializers, fields):
  """Produces a nn initialization `dict` (see Linear docs for a example).

  Grabs initializers for relevant fields if the first argument is a `dict` or
  reuses the same initializer for all fields otherwise. All initializers are
  processed using `_convert_to_initializer`.

  Args:
    initializers: Initializer or <variable, initializer> dictionary.
    fields: Fields nn is expecting for module initialization.

  Returns:
    nn initialization dictionary.
  """

  result = {}
  for f in fields:
    if isinstance(initializers, dict):
      if f in initializers:
        # Variable-specific initializer.
        result[f] = _convert_to_initializer(initializers[f])
    else:
      # Common initiliazer for all variables.
      result[f] = _convert_to_initializer(initializers)

  return result 
Example #12
Source File: models.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def q_zt(self, observations, prev_state, t):
    """Computes a distribution over z_t.

    Args:
      observations: a [batch_size, num_observations, state_size] Tensor.
      prev_state: a [batch_size, state_size] Tensor.
      t: The current timestep, an int Tensor.
    """
    # filter out unneeded past obs
    first_relevant_obs_index = int(math.floor(max(t-1, 0) / self.steps_per_obs))
    num_relevant_observations = self.num_obs - first_relevant_obs_index
    observations = observations[:,first_relevant_obs_index:,:]
    batch_size = tf.shape(prev_state)[0]
    # concatenate the prev state and observations along the second axis (that is
    # not the batch or state size axis, and then flatten it to
    # [batch_size, (num_relevant_observations + 1) * state_size] to feed it into
    # the linear layer.
    q_input = tf.concat([observations, prev_state[:,tf.newaxis, :]], axis=1)
    q_input = tf.reshape(q_input,
                         [batch_size, (num_relevant_observations + 1) * self.state_size])
    q_mu = self.mus[t](q_input)
    q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min)
    q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1])
    q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma))
    tf.logging.info(
        "q(z_{t} | z_{tm1}, x_{obsf}:{obst}) ~ N(Linear([z_{tm1},x_{obsf}:{obst}]), sigma_{t})".format(
            **{"t": t,
               "tm1": t-1,
               "obsf": (first_relevant_obs_index+1)*self.steps_per_obs,
               "obst":self.steps_per_obs*self.num_obs}))
    return q_zt 
Example #13
Source File: models.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               state_size,
               num_timesteps,
               sigma_min=1e-5,
               dtype=tf.float32,
               sigma_init=1.,
               random_seed=None,
               graph_collection_name="R_VARS"):
    self.dtype = dtype
    self.sigma_min = sigma_min
    initializers = {"w": tf.truncated_normal_initializer(seed=random_seed),
                    "b": tf.zeros_initializer}
    self.graph_collection_name=graph_collection_name

    def custom_getter(getter, *args, **kwargs):
      out = getter(*args, **kwargs)
      ref = tf.get_collection_ref(self.graph_collection_name)
      if out not in ref:
        ref.append(out)
      return out

    self.mus= [
        snt.Linear(output_size=state_size,
                   initializers=initializers,
                   name="r_mu_%d" % t,
                   custom_getter=custom_getter)
        for t in xrange(num_timesteps)
    ]

    self.sigmas = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="r_sigma_%d" % (t + 1),
            collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name],
            #initializer=tf.random_uniform_initializer(seed=random_seed, maxval=100))
            initializer=tf.constant_initializer(sigma_init))
        for t in xrange(num_timesteps)
    ] 
Example #14
Source File: models.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               state_size,
               num_timesteps,
               sigma_min=1e-5,
               dtype=tf.float32):
    self.state_size = state_size
    self.num_timesteps = num_timesteps
    self.sigma_min = sigma_min
    self.dtype = dtype
    self.bs = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="b_%d" % (t + 1),
            initializer=tf.zeros_initializer) for t in xrange(num_timesteps)
    ]
    self.Bs = tf.cumsum(self.bs, reverse=True, axis=0)
    self.q_mus = [
        snt.Linear(output_size=state_size) for _ in xrange(num_timesteps)
    ]
    self.q_sigmas = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="q_sigma_%d" % (t + 1),
            initializer=tf.zeros_initializer) for t in xrange(num_timesteps)
    ]
    self.r_mus = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="r_mu_%d" % (t + 1),
            initializer=tf.zeros_initializer) for t in xrange(num_timesteps)
    ]
    self.r_sigmas = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="r_sigma_%d" % (t + 1),
            initializer=tf.zeros_initializer) for t in xrange(num_timesteps)
    ] 
Example #15
Source File: models.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               state_size,
               num_obs,
               steps_per_obs,
               sigma_min=1e-5,
               dtype=tf.float32,
               random_seed=None):
    self.state_size = state_size
    self.sigma_min = sigma_min
    self.dtype = dtype
    self.steps_per_obs = steps_per_obs
    self.num_obs = num_obs
    self.num_timesteps = num_obs*steps_per_obs +1

    initializers =  {
      "w": tf.random_uniform_initializer(seed=random_seed),
      "b": tf.zeros_initializer
    }
    self.mus = [
        snt.Linear(output_size=state_size, initializers=initializers)
        for t in xrange(self.num_timesteps)
    ]
    self.sigmas = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="q_sigma_%d" % (t + 1),
            initializer=tf.random_uniform_initializer(seed=random_seed))
        for t in xrange(self.num_timesteps)
    ] 
Example #16
Source File: common.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, size, use_bias=True, init_const_mag=True):
    self.size = size
    self.use_bias = use_bias
    self.init_const_mag = init_const_mag
    super(Linear, self).__init__(name="commonLinear") 
Example #17
Source File: common.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def _build(self, x):
    if self.init_const_mag:
      initializers={"w": tf.truncated_normal_initializer(stddev=0.01)}
    else:
      initializers={}
    lin = snt.Linear(self.size, use_bias=self.use_bias, initializers=initializers)
    z = lin(x)
    return z

  # This needs to work by string name sadly due to how the variable replace
  # works and would also work even if the custom getter approuch was used.
  # This is verbose, but it should atleast be clear as to what is going on.
  # TODO(lmetz) a better way to do this (the next 3 functions:
  #    _raw_name, w(), b() ) 
Example #18
Source File: model.py    From grid-cells with Apache License 2.0 5 votes vote down vote up
def _build(self, init_conds, vels, training=False):
    """Outputs place, and head direction cell predictions from velocity inputs.

    Args:
      init_conds: Initial conditions given by ensemble activatons, list [BxN_i]
      vels:  Translational and angular velocities [BxTxV]
      training: Activates and deactivates dropout

    Returns:
      [logits_i]:
        logits_i: Logits predicting i-th ensemble activations (BxTxN_i)
    """
    # Calculate initialization for LSTM. Concatenate pc and hdc activations
    concat_init = tf.concat(init_conds, axis=1)

    init_lstm_state = snt.Linear(self._nh_lstm, name="state_init")(concat_init)
    init_lstm_cell = snt.Linear(self._nh_lstm, name="cell_init")(concat_init)
    self._core.training = training

    # Run LSTM
    output_seq, final_state = tf.nn.dynamic_rnn(cell=self._core,
                                                inputs=(vels,),
                                                time_major=False,
                                                initial_state=(init_lstm_state,
                                                               init_lstm_cell))
    ens_targets = output_seq[:-2]
    bottleneck = output_seq[-2]
    lstm_output = output_seq[-1]
    # Return
    return (ens_targets, bottleneck, lstm_output), final_state 
Example #19
Source File: agent.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_sizes: Sequence[int],
               action_spec: specs.DiscreteArray):
    super().__init__(name='policy_value_net')
    self._torso = snt.Sequential([
        snt.Flatten(),
        snt.nets.MLP(hidden_sizes, activate_final=True),
    ])
    self._policy_head = snt.Linear(action_spec.num_values)
    self._value_head = snt.Linear(1)
    self._action_dtype = action_spec.dtype 
Example #20
Source File: codec.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _build_game_over(self, hidden):
        """Parameters for the game over prediction."""
        lin = snt.Linear(2, name="game_over")
        loc, scale_unproj = tf.unstack(lin(hidden), axis=-1)
        scale = util.positive_projection(self._hparams)(scale_unproj)
        return loc, scale 
Example #21
Source File: codec.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _build_score(self, hidden):
        """Parameters for the game score prediction."""
        lin = snt.Linear(2, name="score")
        loc, scale_unproj = tf.unstack(lin(hidden), axis=-1)
        scale = util.positive_projection(self._hparams)(scale_unproj)
        return loc, scale 
Example #22
Source File: codec.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _build_game_output(self, hidden):
        """Parameters for the game output prediction."""
        game_outputs = np.product(self._hparams.game_output_size)
        lin = snt.Linear(2 * game_outputs, name="game_obs")
        loc, scale_diag_unproj = tf.split(lin(hidden), 2, axis=-1)
        scale_diag = util.positive_projection(self._hparams)(scale_diag_unproj)
        return loc, scale_diag 
Example #23
Source File: robust_model.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def add_train_ops(self,
                    num_classes,
                    joint_rep,
                    minibatch):
    """Add ops for training in the computation graph.

    Args:
      num_classes: number of classes to predict in the task.
      joint_rep: the joint sentence representation if the input is sentence
        pairs or the representation for the sentence if the input is a single
        sentence.
      minibatch: a minibatch of sequences of embeddings.
    Returns:
      train_accuracy: the accuracy on the training dataset
      loss: training loss.
      opt_step: training op.
    """
    if self.linear_classifier is None:
      classifier_layers = []
      classifier_layers.append(snt.Linear(num_classes))
      self.linear_classifier = snt.Sequential(classifier_layers)
    logits = self.linear_classifier(joint_rep)
    # Losses and optimizer.
    def get_loss(logits, labels):
      return tf.reduce_mean(
          tf.nn.sparse_softmax_cross_entropy_with_logits(
              labels=labels, logits=logits))

    loss = get_loss(logits, minibatch.sentiment)
    train_accuracy = utils.get_accuracy(logits, minibatch.sentiment)
    opt_step = self._add_optimize_op(loss)
    return train_accuracy, loss, opt_step 
Example #24
Source File: models.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def _build(self, padded_word_embeddings, length):
    x = padded_word_embeddings
    for layer in self._config['conv_architecture']:
      if isinstance(layer, tuple) or isinstance(layer, list):
        filters, kernel_size, pooling_size = layer
        conv = snt.Conv1D(
            output_channels=filters,
            kernel_shape=kernel_size)
        x = conv(x)
        if pooling_size and pooling_size > 1:
          x = _max_pool_1d(x, pooling_size)
      elif layer == 'relu':
        x = tf.nn.relu(x)
        if self._keep_prob < 1:
          x = tf.nn.dropout(x, keep_prob=self._keep_prob)
      else:
        raise RuntimeError('Bad layer type {} in conv'.format(layer))
    # Final layer pools over the remaining sequence length to get a
    # fixed sized vector.
    if self._pooling == 'max':
      x = tf.reduce_max(x, axis=1)
    elif self._pooling == 'average':
      x = tf.reduce_sum(x, axis=1)
      lengths = tf.expand_dims(tf.cast(length, tf.float32), axis=1)
      x = x / lengths

    if self._config['conv_fc1']:
      fc1_layer = snt.Linear(output_size=self._config['conv_fc1'])
      x = tf.nn.relu(fc1_layer(x))
      if self._keep_prob < 1:
        x = tf.nn.dropout(x, keep_prob=self._keep_prob)
    if self._config['conv_fc2']:
      fc2_layer = snt.Linear(output_size=self._config['conv_fc2'])
      x = tf.nn.relu(fc2_layer(x))
      if self._keep_prob < 1:
        x = tf.nn.dropout(x, keep_prob=self._keep_prob)

    return x 
Example #25
Source File: attacks_test.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def testEndToEnd(self, predictor_cls, attack_cls, optimizer_cls, epsilon,
                   restarted=False):
    # l-\infty norm of perturbation ball.
    if isinstance(epsilon, list):
      # We test the ability to have different epsilons across dimensions.
      epsilon = tf.constant([epsilon], dtype=tf.float32)
    bounds = (-.5, 2.5)
    # Create a simple network.
    m = snt.Linear(1, initializers={
        'w': tf.constant_initializer(1.),
        'b': tf.constant_initializer(1.),
    })
    z = tf.constant([[1, 2]], dtype=tf.float32)
    predictor = predictor_cls(m, self)
    # Not important for the test but needed.
    labels = tf.constant([1], dtype=tf.int64)

    # We create two attacks to maximize and then minimize the output.
    max_spec = ibp.LinearSpecification(tf.constant([[[1.]]]))
    max_attack = attack_cls(predictor, max_spec, epsilon, input_bounds=bounds,
                            optimizer_builder=optimizer_cls)
    if restarted:
      max_attack = ibp.RestartedAttack(max_attack, num_restarts=10)
    z_max = max_attack(z, labels)
    min_spec = ibp.LinearSpecification(tf.constant([[[-1.]]]))
    min_attack = attack_cls(predictor, min_spec, epsilon, input_bounds=bounds,
                            optimizer_builder=optimizer_cls)
    if restarted:
      min_attack = ibp.RestartedAttack(min_attack, num_restarts=10)
    z_min = min_attack(z, labels)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      z_max_values, z_min_values = sess.run([z_max, z_min])
      z_max_values = z_max_values[0]
      z_min_values = z_min_values[0]
      self.assertAlmostEqual(2., z_max_values[0])
      self.assertAlmostEqual(2.5, z_max_values[1])
      self.assertAlmostEqual(0., z_min_values[0])
      self.assertAlmostEqual(1., z_min_values[1]) 
Example #26
Source File: bounds_test.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def testCaching(self):
    m = snt.Linear(1, initializers={
        'w': tf.constant_initializer(1.),
        'b': tf.constant_initializer(2.),
    })
    z = tf.placeholder(shape=(1, 3), dtype=tf.float32)
    m(z)  # Connect to create weights.
    m = ibp.LinearFCWrapper(m)
    input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
    output_bounds = m.propagate_bounds(input_bounds)

    input_bounds.enable_caching()
    output_bounds.enable_caching()
    update_all_caches_op = tf.group([input_bounds.update_cache_op,
                                     output_bounds.update_cache_op])

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())

      # Initialise the caches based on the model inputs.
      sess.run(update_all_caches_op, feed_dict={z: [[1., 2., 3.]]})

      l, u = sess.run([output_bounds.lower, output_bounds.upper])
      l = l.item()
      u = u.item()
      self.assertAlmostEqual(5., l)
      self.assertAlmostEqual(11., u)

      # Update the cache based on a different set of inputs.
      sess.run([output_bounds.update_cache_op], feed_dict={z: [[2., 3., 7.]]})
      # We only updated the output bounds' cache.
      # This asserts that the computation depends on the underlying
      # input bounds tensor, not on cached version of it.
      # (Thus it doesn't matter what order the caches are updated.)

      l, u = sess.run([output_bounds.lower, output_bounds.upper])
      l = l.item()
      u = u.item()
      self.assertAlmostEqual(11., l)
      self.assertAlmostEqual(17., u) 
Example #27
Source File: plain_agent.py    From streetlearn with Apache License 2.0 5 votes vote down vote up
def _head(self, core_output):
    """Build the head of the agent: linear policy and value function."""
    policy_logits = snt.Linear(
        self._num_actions, name='policy_logits')(
            core_output)
    baseline = tf.squeeze(snt.Linear(1, name='baseline')(core_output), axis=-1)

    # Sample an action from the policy.
    new_action = tf.multinomial(
        policy_logits, num_samples=1, output_dtype=tf.int32)
    new_action = tf.squeeze(new_action, 1, name='new_action')

    return AgentOutput(new_action, policy_logits, baseline) 
Example #28
Source File: model_test.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def testLeakyRelu(self):
    def _build(z0):
      z = snt.Linear(10)(z0)
      z = tf.nn.leaky_relu(z0, alpha=0.375)
      return snt.Linear(2)(z)

    z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
    wrapper = ibp.VerifiableModelWrapper(_build)
    logits = wrapper(z)
    self.assertLen(wrapper.modules, 3)
    self.assertEqual(wrapper.modules[1].module.__name__, 'leaky_relu')
    self.assertEqual(wrapper.modules[1].parameters['alpha'], 0.375)
    # Check propagation.
    self._propagation_test(wrapper, z, logits) 
Example #29
Source File: model_test.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def testVerifiableModelWrapperSimple(self, fn, expected_modules):
    def _build(z0):
      z = snt.Linear(10)(z0)
      z = fn(z)
      return snt.Linear(2)(z)

    z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
    wrapper = ibp.VerifiableModelWrapper(_build)
    logits = wrapper(z)
    self.assertLen(wrapper.modules, expected_modules)
    # Check propagation.
    self._propagation_test(wrapper, z, logits) 
Example #30
Source File: networks.py    From learning-to-learn with Apache License 2.0 5 votes vote down vote up
def __init__(self, output_size, layers, preprocess_name="identity",
               preprocess_options=None, scale=1.0, initializer=None,
               name="deep_lstm"):
    """Creates an instance of `StandardDeepLSTM`.

    Args:
      output_size: Output sizes of the final linear layer.
      layers: Output sizes of LSTM layers.
      preprocess_name: Gradient preprocessing class name (in `l2l.preprocess` or
          tf modules). Default is `tf.identity`.
      preprocess_options: Gradient preprocessing options.
      scale: Gradient scaling (default is 1.0).
      initializer: Variable initializer for linear layer. See `snt.Linear` and
          `snt.LSTM` docs for more info. This parameter can be a string (e.g.
          "zeros" will be converted to tf.zeros_initializer).
      name: Module name.
    """
    super(StandardDeepLSTM, self).__init__(name=name)

    self._output_size = output_size
    self._scale = scale

    if hasattr(preprocess, preprocess_name):
      preprocess_class = getattr(preprocess, preprocess_name)
      self._preprocess = preprocess_class(**preprocess_options)
    else:
      self._preprocess = getattr(tf, preprocess_name)

    with tf.variable_scope(self._template.variable_scope):
      self._cores = []
      for i, size in enumerate(layers, start=1):
        name = "lstm_{}".format(i)
        init = _get_layer_initializers(initializer, name,
                                       ("w_gates", "b_gates"))
        self._cores.append(snt.LSTM(size, name=name, initializers=init))
      self._rnn = snt.DeepRNN(self._cores, skip_connections=False,
                              name="deep_rnn")

      init = _get_layer_initializers(initializer, "linear", ("w", "b"))
      self._linear = snt.Linear(output_size, name="linear", initializers=init)