Python tensorflow.python.ops.nn.bias_add() Examples

The following are 30 code examples of tensorflow.python.ops.nn.bias_add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.nn , or try the search function .
Example #1
Source File: core.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def call(self, inputs):
    shape = inputs.get_shape().as_list()
    input_dim = shape[-1]
    output_shape = shape[:-1] + [self.units]
    if len(output_shape) > 2:
      # Reshape the input to 2D.
      output_shape_tensors = array_ops.unstack(array_ops.shape(inputs))
      output_shape_tensors[-1] = self.units
      output_shape_tensor = array_ops.stack(output_shape_tensors)
      inputs = array_ops.reshape(inputs, [-1, input_dim])

    outputs = standard_ops.matmul(inputs, self.kernel)
    if self.use_bias:
      outputs = nn.bias_add(outputs, self.bias)

    if len(output_shape) > 2:
      # Reshape the output back to the original ndim of the input.
      outputs = array_ops.reshape(outputs, output_shape_tensor)
      outputs.set_shape(output_shape)

    if self.activation is not None:
      return self.activation(outputs)  # pylint: disable=not-callable
    return outputs 
Example #2
Source File: core.py    From keras-lambda with MIT License 6 votes vote down vote up
def call(self, inputs):
    shape = inputs.get_shape().as_list()
    input_dim = shape[-1]
    output_shape = shape[:-1] + [self.units]
    if len(output_shape) > 2:
      # Reshape the input to 2D.
      output_shape_tensors = array_ops.unstack(array_ops.shape(inputs))
      output_shape_tensors[-1] = self.units
      output_shape_tensor = array_ops.stack(output_shape_tensors)
      inputs = array_ops.reshape(inputs, [-1, input_dim])

    outputs = standard_ops.matmul(inputs, self.kernel)
    if self.use_bias:
      outputs = nn.bias_add(outputs, self.bias)

    if len(output_shape) > 2:
      # Reshape the output back to the original ndim of the input.
      outputs = array_ops.reshape(outputs, output_shape_tensor)
      outputs.set_shape(output_shape)

    if self.activation is not None:
      return self.activation(outputs)  # pylint: disable=not-callable
    return outputs 
Example #3
Source File: test_patch_bias_add.py    From framework-determinism with Apache License 2.0 6 votes vote down vote up
def testDeterministicGradients(self):
    with self.session(force_gpu=True):
      # There are problems with using force_gpu=True and cached_session with
      # both eager mode and graph mode in the same test. Using a non-cached
      # session and putting everything inside the same session context is
      # a compromise.
      for op_binding in (tf.nn.bias_add, nn.bias_add, nn_ops.bias_add):
        for data_layout in ('channels_first', 'channels_last'):
          # With the selected layer configuration, at least in TensorFlow
          # version 2.0, when data_layout='channels_last', bias_add operates
          # deterministically by default. I don't know if this is true for
          # all layer configurations. These cases are still being tested here,
          # for completeness.
          for data_rank in (1, 2, 3):
            for data_type in (dtypes.float16, dtypes.float32, dtypes.float64):
              self._testDeterministicGradientsCase(op_binding, data_layout,
                                                   data_rank, data_type) 
Example #4
Source File: convolutional.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def call(self, inputs):
    # Apply the actual ops.
    if self.data_format == 'channels_last':
      strides = (1,) + self.strides + (1,)
    else:
      strides = (1, 1) + self.strides
    outputs = nn.separable_conv2d(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=strides,
        padding=self.padding.upper(),
        rate=self.dilation_rate,
        data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.use_bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #5
Source File: spectral_norm_dense.py    From tf2rl with MIT License 6 votes vote down vote up
def call(self, inputs):
        w = self.compute_spectral_norm()
        inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
        rank = common_shapes.rank(inputs)
        if rank > 2:
            # Broadcasting is required for the inputs.
            outputs = standard_ops.tensordot(inputs, w, [[rank - 1], [0]])
            # Reshape the output back to the original ndim of the input.
            if not context.executing_eagerly():
                shape = inputs.get_shape().as_list()
                output_shape = shape[:-1] + [self.units]
                outputs.set_shape(output_shape)
        else:
            outputs = gen_math_ops.mat_mul(inputs, w)
        if self.use_bias:
            outputs = nn.bias_add(outputs, self.bias)
        if self.activation is not None:
            return self.activation(outputs)  # pylint: disable=not-callable
        return outputs 
Example #6
Source File: layers.py    From AliNet with MIT License 6 votes vote down vote up
def call(self, inputs, training=True):
        # BN if training
        if training:
            inputs = tf.keras.layers.BatchNormalization()(inputs)
        # dropout if training
        if training and self.dropout_rate > 0.0:
            inputs = dropout(inputs, self.dropout_rate, self.num_features_nonzero, self.is_sparse_inputs)
        if not training:
            print("gcn not training now")
        # convolve
        hidden_vectors = list()
        for i in range(len(self.adjs)):
            pre_sup = tf.matmul(inputs, self.kernels[i], a_is_sparse=self.is_sparse_inputs)
            hidden_vector = tf.sparse.sparse_dense_matmul(tf.cast(self.adjs[i], tf.float32), pre_sup)
            hidden_vectors.append(hidden_vector)
        outputs = tf.add_n(hidden_vectors)
        # bias
        if self.use_bias:
            outputs = nn.bias_add(outputs, self.bias)
        # activation
        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #7
Source File: core.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
    shape = inputs.get_shape().as_list()
    if len(shape) > 2:
      # Broadcasting is required for the inputs.
      outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
                                                             [0]])
      # Reshape the output back to the original ndim of the input.
      if context.in_graph_mode():
        output_shape = shape[:-1] + [self.units]
        outputs.set_shape(output_shape)
    else:
      outputs = standard_ops.matmul(inputs, self.kernel)
    if self.use_bias:
      outputs = nn.bias_add(outputs, self.bias)
    if self.activation is not None:
      return self.activation(outputs)  # pylint: disable=not-callable
    return outputs 
Example #8
Source File: core.py    From lambda-packs with MIT License 6 votes vote down vote up
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
    shape = inputs.get_shape().as_list()
    output_shape = shape[:-1] + [self.units]
    if len(output_shape) > 2:
      # Broadcasting is required for the inputs.
      outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
                                                             [0]])
      # Reshape the output back to the original ndim of the input.
      outputs.set_shape(output_shape)
    else:
      outputs = standard_ops.matmul(inputs, self.kernel)
    if self.use_bias:
      outputs = nn.bias_add(outputs, self.bias)
    if self.activation is not None:
      return self.activation(outputs)  # pylint: disable=not-callable
    return outputs 
Example #9
Source File: layers.py    From AliNet with MIT License 5 votes vote down vote up
def call(self, inputs):
        inputs = dropout(inputs, self.dropout_rate, self.num_features_nonzero, self.is_sparse_inputs)
        outputs = tf.matmul(inputs, self.kernel, a_is_sparse=self.is_sparse_inputs)
        if self.use_bias:
            outputs = nn.bias_add(outputs, self.bias)
        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #10
Source File: separable_conv2d.py    From RoboND-DeepLearning-Project with MIT License 5 votes vote down vote up
def call(self, inputs):
    if self.data_format == 'channels_first':
      # Reshape to channels last
      inputs = array_ops.transpose(inputs, (0, 2, 3, 1))

    # Apply the actual ops.
    outputs = separable_conv2d_tf_nn(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=(1,) + self.strides + (1,),
        padding=self.padding.upper(),
        rate=self.dilation_rate)

    if self.data_format == 'channels_first':
      # Reshape to channels first
      outputs = array_ops.transpose(outputs, (0, 3, 1, 2))

    if self.bias is not None:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #11
Source File: test_patch_bias_add.py    From framework-determinism with Apache License 2.0 5 votes vote down vote up
def _testBias(self, np_inputs, np_bias, use_gpu=False):
    np_val = self._npBias(np_inputs, np_bias)
    with self.cached_session(use_gpu=use_gpu):
      tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias))
    self.assertAllCloseAccordingToType(np_val, tf_val) 
Example #12
Source File: gdn.py    From pcc_geo_cnn with MIT License 5 votes vote down vote up
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
    ndim = self._input_rank

    if self.rectify:
      inputs = nn.relu(inputs)

    # Compute normalization pool.
    if ndim == 2:
      norm_pool = math_ops.matmul(math_ops.square(inputs), self.gamma)
      norm_pool = nn.bias_add(norm_pool, self.beta)
    elif self.data_format == "channels_last" and ndim <= 5:
      shape = self.gamma.shape.as_list()
      gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
      norm_pool = nn.convolution(math_ops.square(inputs), gamma, "VALID")
      norm_pool = nn.bias_add(norm_pool, self.beta)
    else:  # generic implementation
      # This puts channels in the last dimension regardless of input.
      norm_pool = math_ops.tensordot(
          math_ops.square(inputs), self.gamma, [[self._channel_axis()], [0]])
      norm_pool += self.beta
      if self.data_format == "channels_first":
        # Return to channels_first format if necessary.
        axes = list(range(ndim - 1))
        axes.insert(1, ndim - 1)
        norm_pool = array_ops.transpose(norm_pool, axes)

    if self.inverse:
      norm_pool = math_ops.sqrt(norm_pool)
    else:
      norm_pool = math_ops.rsqrt(norm_pool)
    outputs = inputs * norm_pool

    if not context.executing_eagerly():
      outputs.set_shape(self.compute_output_shape(inputs.shape))
    return outputs 
Example #13
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _eval_op(self, features, labels, logits=None, logits_input=None,
               name="eval_op"):
    labels = _check_labels(labels, self._label_name)
    if self._enable_centered_bias:
      logits = nn.bias_add(logits, _centered_bias(
          self.logits_dimension,
          self._centered_bias_weight_collection))
    loss_unweighted = self._eval_loss_fn(logits, labels)
    loss, _ = _loss(loss_unweighted,
                    _weight_tensor(features, self._weight_column_name),
                    name=name)

    predictions = self._logits_to_prediction(logits)

    return predictions, loss 
Example #14
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _infer_op(self, logits=None, logits_input=None):
    if self._enable_centered_bias:
      logits = nn.bias_add(logits, _centered_bias(
          self.logits_dimension,
          self._centered_bias_weight_collection))
    return self._logits_to_prediction(logits) 
Example #15
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _eval_op(self, features, labels, logits=None, logits_input=None,
               name="eval_op"):
    labels = _check_labels(labels, self._label_name)
    if self._enable_centered_bias:
      logits = nn.bias_add(logits, _centered_bias(
          self.logits_dimension,
          self._centered_bias_weight_collection))
    loss_unweighted = self._eval_loss_fn(logits, labels)
    loss, _ = _loss(loss_unweighted,
                    _weight_tensor(features, self._weight_column_name),
                    name=name)

    predictions = self._logits_to_prediction(logits)

    return predictions, loss 
Example #16
Source File: layers.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
    ndim = self._input_rank

    shape = self.gamma.get_shape().as_list()
    gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)

    # Compute normalization pool.
    if self.data_format == 'channels_first':
      norm_pool = nn.convolution(
          math_ops.square(inputs),
          gamma,
          'VALID',
          data_format='NC' + 'DHW' [-(ndim - 2):])
      if ndim == 3:
        norm_pool = array_ops.expand_dims(norm_pool, 2)
        norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
        norm_pool = array_ops.squeeze(norm_pool, [2])
      elif ndim == 5:
        shape = array_ops.shape(norm_pool)
        norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])
        norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
        norm_pool = array_ops.reshape(norm_pool, shape)
      else:  # ndim == 4
        norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
    else:  # channels_last
      norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')
      norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')
    norm_pool = math_ops.sqrt(norm_pool)

    if self.inverse:
      outputs = inputs * norm_pool
    else:
      outputs = inputs / norm_pool
    outputs.set_shape(inputs.get_shape())
    return outputs 
Example #17
Source File: convolutional.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def call(self, inputs):
    outputs = self._convolution_op(inputs, self.kernel)

    if self.use_bias:
      if self.data_format == 'channels_first':
        if self.rank == 1:
          # nn.bias_add does not accept a 1D input tensor.
          bias = array_ops.reshape(self.bias, (1, self.filters, 1))
          outputs += bias
        if self.rank == 2:
          outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
        if self.rank == 3:
          # As of Mar 2017, direct addition is significantly slower than
          # bias_add when computing gradients. To use bias_add, we collapse Z
          # and Y into a single dimension to obtain a 4D input tensor.
          outputs_shape = outputs.shape.as_list()
          outputs_4d = array_ops.reshape(outputs,
                                         [outputs_shape[0], outputs_shape[1],
                                          outputs_shape[2] * outputs_shape[3],
                                          outputs_shape[4]])
          outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
          outputs = array_ops.reshape(outputs_4d, outputs_shape)
      else:
        outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #18
Source File: test_patch_bias_add.py    From framework-determinism with Apache License 2.0 5 votes vote down vote up
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
    np_val = self._npBias(np_inputs, np_bias)
    np_inputs = self._NHWCToNCHW(np_inputs)
    with self.cached_session(use_gpu=use_gpu):
      tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias,
                                             data_format="NCHW"))
    tf_val = self._NCHWToNHWC(tf_val)
    self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val) 
Example #19
Source File: patch.py    From framework-determinism with Apache License 2.0 5 votes vote down vote up
def _patch_bias_add():
  tf.nn.bias_add = _new_bias_add_1_14 # access via public API
  nn.bias_add = _new_bias_add_1_14 # called from tf.keras.layers.convolutional.Conv
  nn_ops.bias_add = _new_bias_add_1_14 # called from tests

# The original, pre-patched method can be viewed at
# https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628 
Example #20
Source File: convolutional.py    From keras-lambda with MIT License 5 votes vote down vote up
def call(self, inputs):
    outputs = nn.convolution(
        input=inputs,
        filter=self.kernel,
        dilation_rate=self.dilation_rate,
        strides=self.strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, self.rank + 2))
    if self.bias is not None:
      if self.rank != 2 and self.data_format == 'channels_first':
        # bias_add does not support channels_first for non-4D inputs.
        if self.rank == 1:
          bias = array_ops.reshape(self.bias, (1, self.filters, 1))
        if self.rank == 3:
          bias = array_ops.reshape(self.bias, (1, self.filters, 1, 1))
        outputs += bias
      else:
        outputs = nn.bias_add(
            outputs,
            self.bias,
            data_format=utils.convert_data_format(self.data_format, 4))
        # Note that we passed rank=4 because bias_add will only accept
        # NHWC and NCWH even if the rank of the inputs is 3 or 5.

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #21
Source File: convolutional.py    From keras-lambda with MIT License 5 votes vote down vote up
def call(self, inputs):
    if self.data_format == 'channels_first':
      # Reshape to channels last
      inputs = array_ops.transpose(inputs, (0, 2, 3, 1))

    # Apply the actual ops.
    outputs = nn.separable_conv2d(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=(1,) + self.strides + (1,),
        padding=self.padding.upper(),
        rate=self.dilation_rate)

    if self.data_format == 'channels_first':
      # Reshape to channels first
      outputs = array_ops.transpose(outputs, (0, 3, 1, 2))

    if self.bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #22
Source File: convolutional.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def call(self, inputs):
    if self.data_format == 'channels_first':
      # Reshape to channels last
      inputs = array_ops.transpose(inputs, (0, 2, 3, 1))

    # Apply the actual ops.
    outputs = nn.separable_conv2d(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=(1,) + self.strides + (1,),
        padding=self.padding.upper(),
        rate=self.dilation_rate)

    if self.data_format == 'channels_first':
      # Reshape to channels first
      outputs = array_ops.transpose(outputs, (0, 3, 1, 2))

    if self.bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #23
Source File: convolutional.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def call(self, inputs):
    outputs = nn.convolution(
        input=inputs,
        filter=self.kernel,
        dilation_rate=self.dilation_rate,
        strides=self.strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, self.rank + 2))
    if self.bias is not None:
      if self.rank != 2 and self.data_format == 'channels_first':
        # bias_add does not support channels_first for non-4D inputs.
        if self.rank == 1:
          bias = array_ops.reshape(self.bias, (1, self.filters, 1))
        if self.rank == 3:
          bias = array_ops.reshape(self.bias, (1, self.filters, 1, 1))
        outputs += bias
      else:
        outputs = nn.bias_add(
            outputs,
            self.bias,
            data_format=utils.convert_data_format(self.data_format, 4))
        # Note that we passed rank=4 because bias_add will only accept
        # NHWC and NCWH even if the rank of the inputs is 3 or 5.

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #24
Source File: backend.py    From lambda-packs with MIT License 5 votes vote down vote up
def bias_add(x, bias, data_format=None):
  """Adds a bias vector to a tensor.

  Arguments:
      x: Tensor or variable.
      bias: Bias tensor to add.
      data_format: Data format for 3D, 4D or 5D tensors:
          one of "channels_first", "channels_last".

  Returns:
      Output tensor.

  Raises:
      ValueError: In case of invalid `data_format` argument.
  """
  if data_format is None:
    data_format = image_data_format()
  if data_format not in {'channels_first', 'channels_last'}:
    raise ValueError('Unknown data_format ' + str(data_format))
  if ndim(x) == 5:
    if data_format == 'channels_first':
      x += reshape(bias, (1, int_shape(bias)[0], 1, 1, 1))
    elif data_format == 'channels_last':
      x += reshape(bias, (1, 1, 1, 1, int_shape(bias)[0]))
  elif ndim(x) == 4:
    if data_format == 'channels_first':
      # No support yet for NCHW in bias_add.
      x += reshape(bias, (1, int_shape(bias)[0], 1, 1))
    elif data_format == 'channels_last':
      x = nn.bias_add(x, bias, data_format='NHWC')
  elif ndim(x) == 3:
    if data_format == 'channels_first':
      x += reshape(bias, (1, int_shape(bias)[0], 1))
    elif data_format == 'channels_last':
      x += reshape(bias, (1, 1, int_shape(bias)[0]))
  else:
    x = nn.bias_add(x, bias)
  return x


# RANDOMNESS 
Example #25
Source File: convolutional.py    From lambda-packs with MIT License 5 votes vote down vote up
def call(self, inputs):
    if self.data_format == 'channels_first':
      # Reshape to channels last
      inputs = array_ops.transpose(inputs, (0, 2, 3, 1))

    # Apply the actual ops.
    outputs = nn.separable_conv2d(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=(1,) + self.strides + (1,),
        padding=self.padding.upper(),
        rate=self.dilation_rate)

    if self.data_format == 'channels_first':
      # Reshape to channels first
      outputs = array_ops.transpose(outputs, (0, 3, 1, 2))

    if self.bias is not None:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #26
Source File: convolutional.py    From lambda-packs with MIT License 5 votes vote down vote up
def call(self, inputs):
    outputs = nn.convolution(
        input=inputs,
        filter=self.kernel,
        dilation_rate=self.dilation_rate,
        strides=self.strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, self.rank + 2))

    if self.bias is not None:
      if self.data_format == 'channels_first':
        # bias_add only supports NHWC.
        # TODO(fchollet): remove this when `bias_add` is feature-complete.
        if self.rank == 1:
          bias = array_ops.reshape(self.bias, (1, self.filters, 1))
          outputs += bias
        if self.rank == 2:
          bias = array_ops.reshape(self.bias, (1, self.filters, 1, 1))
          outputs += bias
        if self.rank == 3:
          # As of Mar 2017, direct addition is significantly slower than
          # bias_add when computing gradients. To use bias_add, we collapse Z
          # and Y into a single dimension to obtain a 4D input tensor.
          outputs_shape = outputs.shape.as_list()
          outputs_4d = array_ops.reshape(outputs,
                                         [outputs_shape[0], outputs_shape[1],
                                          outputs_shape[2] * outputs_shape[3],
                                          outputs_shape[4]])
          outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
          outputs = array_ops.reshape(outputs_4d, outputs_shape)
      else:
        outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #27
Source File: layers.py    From tensornets with MIT License 5 votes vote down vote up
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
    ndim = self._input_rank

    shape = self.gamma.get_shape().as_list()
    gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)

    # Compute normalization pool.
    if self.data_format == 'channels_first':
      norm_pool = nn.convolution(
          math_ops.square(inputs),
          gamma,
          'VALID',
          data_format='NC' + 'DHW' [-(ndim - 2):])
      if ndim == 3:
        norm_pool = array_ops.expand_dims(norm_pool, 2)
        norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
        norm_pool = array_ops.squeeze(norm_pool, [2])
      elif ndim == 5:
        shape = array_ops.shape(norm_pool)
        norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])
        norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
        norm_pool = array_ops.reshape(norm_pool, shape)
      else:  # ndim == 4
        norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
    else:  # channels_last
      norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')
      norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')
    norm_pool = math_ops.sqrt(norm_pool)

    if self.inverse:
      outputs = inputs * norm_pool
    else:
      outputs = inputs / norm_pool
    outputs.set_shape(inputs.get_shape())
    return outputs 
Example #28
Source File: head.py    From keras-lambda with MIT License 4 votes vote down vote up
def head_ops(self,
               features,
               labels,
               mode,
               train_op_fn,
               logits=None,
               logits_input=None,
               scope=None):
    """See `_Head`."""
    _check_mode_valid(mode)
    _check_logits_input_not_supported(logits, logits_input)

    centered_bias = None
    if self._enable_centered_bias:
      centered_bias = _centered_bias(self._logits_dimension, self.head_name)
      logits = nn.bias_add(logits, centered_bias)

    predictions = self._logits_to_predictions(logits)
    loss = None
    train_op = None
    eval_metric_ops = None
    if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
      labels_tensor = _to_labels_tensor(labels, self._label_name)
      loss = _training_loss(
          features,
          labels_tensor,
          logits,
          loss_fn=self._loss_fn,
          weight_column_name=self._weight_column_name,
          head_name=self.head_name)
      if (mode == model_fn.ModeKeys.TRAIN) and (train_op_fn is not None):
        train_op = _train_op(loss, labels_tensor, train_op_fn, centered_bias,
                             self._logits_dimension, self._loss_fn)
      eval_metric_ops = _eval_metric_ops(self._default_metrics(), features,
                                         labels, predictions)

    return model_fn.ModelFnOps(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops,
        output_alternatives=self._create_output_alternatives(predictions)) 
Example #29
Source File: convolutional.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def call(self, inputs):
    inputs_shape = array_ops.shape(inputs)
    batch_size = inputs_shape[0]
    if self.data_format == 'channels_first':
      c_axis, h_axis, w_axis = 1, 2, 3
    else:
      c_axis, h_axis, w_axis = 3, 1, 2

    height, width = inputs_shape[h_axis], inputs_shape[w_axis]
    kernel_h, kernel_w = self.kernel_size
    stride_h, stride_w = self.strides

    # Infer the dynamic output shape:
    out_height = utils.deconv_output_length(height,
                                            kernel_h,
                                            self.padding,
                                            stride_h)
    out_width = utils.deconv_output_length(width,
                                           kernel_w,
                                           self.padding,
                                           stride_w)
    if self.data_format == 'channels_first':
      output_shape = (batch_size, self.filters, out_height, out_width)
      strides = (1, 1, stride_h, stride_w)
    else:
      output_shape = (batch_size, out_height, out_width, self.filters)
      strides = (1, stride_h, stride_w, 1)

    output_shape_tensor = array_ops.stack(output_shape)
    outputs = nn.conv2d_transpose(
        inputs,
        self.kernel,
        output_shape_tensor,
        strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, ndim=4))

    if context.in_graph_mode():
      # Infer the static output shape:
      out_shape = inputs.get_shape().as_list()
      out_shape[c_axis] = self.filters
      out_shape[h_axis] = utils.deconv_output_length(out_shape[h_axis],
                                                     kernel_h,
                                                     self.padding,
                                                     stride_h)
      out_shape[w_axis] = utils.deconv_output_length(out_shape[w_axis],
                                                     kernel_w,
                                                     self.padding,
                                                     stride_w)
      outputs.set_shape(out_shape)

    if self.use_bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
Example #30
Source File: convolutional.py    From keras-lambda with MIT License 4 votes vote down vote up
def call(self, inputs):
    inputs_shape = array_ops.shape(inputs)
    batch_size = inputs_shape[0]
    if self.data_format == 'channels_first':
      c_axis, h_axis, w_axis = 1, 2, 3
    else:
      c_axis, h_axis, w_axis = 3, 1, 2

    height, width = inputs_shape[h_axis], inputs_shape[w_axis]
    kernel_h, kernel_w = self.kernel_size
    stride_h, stride_w = self.strides

    def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
      if isinstance(dim_size, ops.Tensor):
        dim_size = math_ops.multiply(dim_size, stride_size)
      elif dim_size is not None:
        dim_size *= stride_size

      if padding == 'valid' and dim_size is not None:
        dim_size += max(kernel_size - stride_size, 0)
      return dim_size

    # Infer the dynamic output shape:
    out_height = get_deconv_dim(height, stride_h, kernel_h, self.padding)
    out_width = get_deconv_dim(width, stride_w, kernel_w, self.padding)

    if self.data_format == 'channels_first':
      output_shape = (batch_size, self.filters, out_height, out_width)
      strides = (1, 1, stride_h, stride_w)
    else:
      output_shape = (batch_size, out_height, out_width, self.filters)
      strides = (1, stride_h, stride_w, 1)

    output_shape_tensor = array_ops.stack(output_shape)
    outputs = nn.conv2d_transpose(
        inputs,
        self.kernel,
        output_shape_tensor,
        strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, ndim=4))

    # Infer the static output shape:
    out_shape = inputs.get_shape().as_list()
    out_shape[c_axis] = self.filters
    out_shape[h_axis] = get_deconv_dim(
        out_shape[h_axis], stride_h, kernel_h, self.padding)
    out_shape[w_axis] = get_deconv_dim(
        out_shape[w_axis], stride_w, kernel_w, self.padding)
    outputs.set_shape(out_shape)

    if self.bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs