Python sonnet.Conv1D() Examples

The following are 13 code examples of sonnet.Conv1D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sonnet , or try the search function .
Example #1
Source File: more_local_weight_update.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def _build(self, x):
    # x is [units, bs, 1]
    net = tf.transpose(x, [1, 0, 2])  # now [bs x units x 1]
    channels = x.shape.as_list()[2]
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    to_concat = tf.transpose(net, [1, 0, 2])
    if self.add:
      return x + to_concat
    else:
      return tf.concat([x, to_concat], 2) 
Example #2
Source File: bounds_test.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def testConv1dIntervalBounds(self):
    m = snt.Conv1D(
        output_channels=1,
        kernel_shape=2,
        padding='VALID',
        stride=1,
        use_bias=True,
        initializers={
            'w': tf.constant_initializer(1.),
            'b': tf.constant_initializer(2.),
        })
    z = tf.constant([3, 4], dtype=tf.float32)
    z = tf.reshape(z, [1, 2, 1])
    m(z)  # Connect to create weights.
    m = ibp.LinearConv1dWrapper(m)
    input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
    output_bounds = m.propagate_bounds(input_bounds)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      l, u = sess.run([output_bounds.lower, output_bounds.upper])
      l = l.item()
      u = u.item()
      self.assertAlmostEqual(7., l)
      self.assertAlmostEqual(11., u) 
Example #3
Source File: more_local_weight_update.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def _build(self, x):
    # x is [units, bs, 1]
    net = tf.transpose(x, [1, 0, 2])  # now [bs x units x 1]
    channels = x.shape.as_list()[2]
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    to_concat = tf.transpose(net, [1, 0, 2])
    if self.add:
      return x + to_concat
    else:
      return tf.concat([x, to_concat], 2) 
Example #4
Source File: more_local_weight_update.py    From models with Apache License 2.0 6 votes vote down vote up
def _build(self, x):
    # x is [units, bs, 1]
    net = tf.transpose(x, [1, 0, 2])  # now [bs x units x 1]
    channels = x.shape.as_list()[2]
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    to_concat = tf.transpose(net, [1, 0, 2])
    if self.add:
      return x + to_concat
    else:
      return tf.concat([x, to_concat], 2) 
Example #5
Source File: more_local_weight_update.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def _build(self, x):
    # x is [units, bs, 1]
    net = tf.transpose(x, [1, 0, 2])  # now [bs x units x 1]
    channels = x.shape.as_list()[2]
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
    net = mod(net)
    net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
    net = tf.nn.relu(net)
    to_concat = tf.transpose(net, [1, 0, 2])
    if self.add:
      return x + to_concat
    else:
      return tf.concat([x, to_concat], 2) 
Example #6
Source File: model.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def _inputs_for_observed_module(self, subgraph):
    """Extracts input tensors from a connected Sonnet module.

    This default implementation supports common layer types, but should be
    overridden if custom layer types are to be supported.

    Args:
      subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
        connected, and its inputs and outputs.

    Returns:
      List of input tensors, or None if not a supported Sonnet module.
    """
    m = subgraph.module
    # Only support a few operations for now.
    if not (isinstance(m, snt.BatchReshape) or
            isinstance(m, snt.Linear) or
            isinstance(m, snt.Conv1D) or
            isinstance(m, snt.Conv2D) or
            isinstance(m, snt.BatchNorm) or
            isinstance(m, layers.ImageNorm)):
      return None

    if isinstance(m, snt.BatchNorm):
      return subgraph.inputs['input_batch'],
    else:
      return subgraph.inputs['inputs'], 
Example #7
Source File: model.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def _wrapper_for_observed_module(self, subgraph):
    """Creates a wrapper for a connected Sonnet module.

    This default implementation supports common layer types, but should be
    overridden if custom layer types are to be supported.

    Args:
      subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
        connected, and its inputs and outputs.

    Returns:
      `ibp.VerifiableWrapper` for the Sonnet module.
    """
    m = subgraph.module
    if isinstance(m, snt.BatchReshape):
      shape = subgraph.outputs.get_shape()[1:].as_list()
      return verifiable_wrapper.BatchReshapeWrapper(m, shape)
    elif isinstance(m, snt.Linear):
      return verifiable_wrapper.LinearFCWrapper(m)
    elif isinstance(m, snt.Conv1D):
      return verifiable_wrapper.LinearConv1dWrapper(m)
    elif isinstance(m, snt.Conv2D):
      return verifiable_wrapper.LinearConv2dWrapper(m)
    elif isinstance(m, layers.ImageNorm):
      return verifiable_wrapper.ImageNormWrapper(m)
    else:
      assert isinstance(m, snt.BatchNorm)
      return verifiable_wrapper.BatchNormWrapper(m) 
Example #8
Source File: verifiable_wrapper.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def __init__(self, module):
    if not isinstance(module, snt.Conv1D):
      raise ValueError('Cannot wrap {} with a LinearConv1dWrapper.'.format(
          module))
    super(LinearConv1dWrapper, self).__init__(module) 
Example #9
Source File: fastlin_test.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def testConv1dSymbolicBounds(self):
    m = snt.Conv1D(
        output_channels=1,
        kernel_shape=(2),
        padding='VALID',
        stride=1,
        use_bias=True,
        initializers={
            'w': tf.constant_initializer(1.),
            'b': tf.constant_initializer(3.),
        })
    z = tf.constant([3, 4], dtype=tf.float32)
    z = tf.reshape(z, [1, 2, 1])
    m(z)  # Connect to create weights.
    m = ibp.LinearConv1dWrapper(m)
    input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
    input_bounds = ibp.SymbolicBounds.convert(input_bounds)
    output_bounds = m.propagate_bounds(input_bounds)
    output_bounds = ibp.IntervalBounds.convert(output_bounds)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      l, u = sess.run([output_bounds.lower, output_bounds.upper])
      l = l.item()
      u = u.item()
      self.assertAlmostEqual(8., l)
      self.assertAlmostEqual(12., u) 
Example #10
Source File: more_local_weight_update.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def compute_top_delta(self, z):
    """ parameterization of topD. This converts the top level activation
    to an error signal.
    Args:
      z: tf.Tensor
        batch of final layer post activations
    Returns
      delta: tf.Tensor
        the error signal
    """
    s_idx = 0
    with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
      # typically this takes [BS, length, input_channels],
      # We are applying this such that we convolve over the batch dimension.
      act = tf.expand_dims(tf.transpose(z, [1, 0]), 2)  # [channels, BS, 1]

      mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
      act = mod(act)

      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)

      bs = act.shape.as_list()[0]
      act = tf.transpose(act, [2, 1, 0])
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = tf.transpose(act, [2, 1, 0])

      prev_act = act
      for i in range(self.top_delta_layers):
        mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
        act = mod(act)

        act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
        act = tf.nn.relu(act)

        prev_act = act

      mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
      act = mod(act)

      # [bs, feature_channels, delta_channels]
      act = tf.transpose(act, [1, 0, 2])
      return act 
Example #11
Source File: more_local_weight_update.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def compute_top_delta(self, z):
    """ parameterization of topD. This converts the top level activation
    to an error signal.
    Args:
      z: tf.Tensor
        batch of final layer post activations
    Returns
      delta: tf.Tensor
        the error signal
    """
    s_idx = 0
    with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
      # typically this takes [BS, length, input_channels],
      # We are applying this such that we convolve over the batch dimension.
      act = tf.expand_dims(tf.transpose(z, [1, 0]), 2)  # [channels, BS, 1]

      mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
      act = mod(act)

      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)

      bs = act.shape.as_list()[0]
      act = tf.transpose(act, [2, 1, 0])
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = tf.transpose(act, [2, 1, 0])

      prev_act = act
      for i in range(self.top_delta_layers):
        mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
        act = mod(act)

        act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
        act = tf.nn.relu(act)

        prev_act = act

      mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
      act = mod(act)

      # [bs, feature_channels, delta_channels]
      act = tf.transpose(act, [1, 0, 2])
      return act 
Example #12
Source File: more_local_weight_update.py    From models with Apache License 2.0 4 votes vote down vote up
def compute_top_delta(self, z):
    """ parameterization of topD. This converts the top level activation
    to an error signal.
    Args:
      z: tf.Tensor
        batch of final layer post activations
    Returns
      delta: tf.Tensor
        the error signal
    """
    s_idx = 0
    with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
      # typically this takes [BS, length, input_channels],
      # We are applying this such that we convolve over the batch dimension.
      act = tf.expand_dims(tf.transpose(z, [1, 0]), 2)  # [channels, BS, 1]

      mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
      act = mod(act)

      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)

      bs = act.shape.as_list()[0]
      act = tf.transpose(act, [2, 1, 0])
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = tf.transpose(act, [2, 1, 0])

      prev_act = act
      for i in range(self.top_delta_layers):
        mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
        act = mod(act)

        act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
        act = tf.nn.relu(act)

        prev_act = act

      mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
      act = mod(act)

      # [bs, feature_channels, delta_channels]
      act = tf.transpose(act, [1, 0, 2])
      return act 
Example #13
Source File: more_local_weight_update.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def compute_top_delta(self, z):
    """ parameterization of topD. This converts the top level activation
    to an error signal.
    Args:
      z: tf.Tensor
        batch of final layer post activations
    Returns
      delta: tf.Tensor
        the error signal
    """
    s_idx = 0
    with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
      # typically this takes [BS, length, input_channels],
      # We are applying this such that we convolve over the batch dimension.
      act = tf.expand_dims(tf.transpose(z, [1, 0]), 2)  # [channels, BS, 1]

      mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
      act = mod(act)

      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)

      bs = act.shape.as_list()[0]
      act = tf.transpose(act, [2, 1, 0])
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
      act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
      act = tf.nn.relu(act)
      act = tf.transpose(act, [2, 1, 0])

      prev_act = act
      for i in range(self.top_delta_layers):
        mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
        act = mod(act)

        act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
        act = tf.nn.relu(act)

        prev_act = act

      mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
      act = mod(act)

      # [bs, feature_channels, delta_channels]
      act = tf.transpose(act, [1, 0, 2])
      return act