Python tensorflow.abs() Examples

The following are 30 code examples of tensorflow.abs(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = tf.layers.dense(x, depth * 2, activation=None)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
Example #2
Source File: losses.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
    """
    diff = prediction_tensor - target_tensor
    abs_diff = tf.abs(diff)
    abs_diff_lt_1 = tf.less(abs_diff, 1)
    anchorwise_smooth_l1norm = tf.reduce_sum(
        tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
        2) * weights
    if self._anchorwise_output:
      return anchorwise_smooth_l1norm
    return tf.reduce_sum(anchorwise_smooth_l1norm) 
Example #3
Source File: loss_functions.py    From Adversarial_Video_Generation with MIT License 6 votes vote down vote up
def lp_loss(gen_frames, gt_frames, l_num):
    """
    Calculates the sum of lp losses between the predicted and ground truth frames.

    @param gen_frames: The predicted frames at each scale.
    @param gt_frames: The ground truth frames at each scale
    @param l_num: 1 or 2 for l1 and l2 loss, respectively).

    @return: The lp loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(gen_frames)):
        scale_losses.append(tf.reduce_sum(tf.abs(gen_frames[i] - gt_frames[i])**l_num))

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses)) 
Example #4
Source File: TargetingSystem.py    From poeai with MIT License 6 votes vote down vote up
def GetItemPixels(self, I):
        '''
        Locates items that should be picked up on the screen
        '''
        ws = [8, 14]
        D1 = np.abs(I - np.array([10.8721,  12.8995,  13.9932])).sum(axis = 2) < 15
        D2 = np.abs(I - np.array([118.1302, 116.0938, 106.9063])).sum(axis = 2) < 76
        R1 = view_as_windows(D1, ws, ws).sum(axis = (2, 3))
        R2 = view_as_windows(D2, ws, ws).sum(axis = (2, 3))
        FR = ((R1 + R2 / np.prod(ws)) >= 1.0) & (R1 > 10) & (R2 > 10)
        PL = np.transpose(np.nonzero(FR)) * np.array(ws)
        if len(PL) <= 0:
            return []
        bc = Birch(threshold = 50, n_clusters = None)
        bc.fit(PL)
        return bc.subcluster_centers_ 
Example #5
Source File: __init__.py    From spleeter with MIT License 6 votes vote down vote up
def _build_stft_feature(self):
        """ Compute STFT of waveform and slice the STFT in segment
         with the right length to feed the network.
        """

        stft_name = self.stft_name
        spec_name = self.spectrogram_name

        if stft_name not in self._features:
            stft_feature = tf.transpose(
                stft(
                    tf.transpose(self._features['waveform']),
                    self._frame_length,
                    self._frame_step,
                    window_fn=lambda frame_length, dtype: (
                        hann_window(frame_length, periodic=True, dtype=dtype)),
                    pad_end=True),
                perm=[1, 2, 0])
            self._features[f'{self._mix_name}_stft'] = stft_feature
        if spec_name not in self._features:
            self._features[spec_name] = tf.abs(
                pad_and_partition(self._features[stft_name], self._T))[:, :, :self._F, :] 
Example #6
Source File: optimizer.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
        beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)

        eps = 1e-7  # cap for moving average

        m = self.get_slot(var, "m")
        m_slice = tf.gather(m, grad.indices)
        m_t = state_ops.scatter_update(m, grad.indices,
                                       tf.maximum(beta_t * m_slice + eps, tf.abs(grad.values)))
        m_t_slice = tf.gather(m_t, grad.indices)

        var_update = state_ops.scatter_sub(var, grad.indices, lr_t * grad.values * tf.exp(
            tf.log(alpha_t) * tf.sign(grad.values) * tf.sign(m_t_slice)))  # Update 'ref' by subtracting 'value
        # Create an op that groups multiple operations.
        # When this op finishes, all ops in input have finished
        return control_flow_ops.group(*[var_update, m_t]) 
Example #7
Source File: expert_utils.py    From fine-lm with MIT License 6 votes vote down vote up
def __init__(self, pad_mask):
    """Compute and store the location of the padding.

    Args:
      pad_mask (tf.Tensor): Reference padding tensor of shape
        [batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
        containing non-zeros positive values to indicate padding location.
    """
    self.nonpad_ids = None
    self.dim_origin = None

    with tf.name_scope("pad_reduce/get_ids"):
      pad_mask = tf.reshape(pad_mask, [-1])  # Flatten the batch
      # nonpad_ids contains coordinates of zeros rows (as pad_mask is
      # float32, checking zero equality is done with |x| < epsilon, with
      # epsilon=1e-9 as standard, here pad_mask only contains positive values
      # so tf.abs would be redundant)
      self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
      self.dim_origin = tf.shape(pad_mask)[:1] 
Example #8
Source File: optimizer.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
        alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)

        eps = 1e-7  # cap for moving average

        m = self.get_slot(var, "m")
        m_slice = tf.gather(m, grad.indices)
        m_t = state_ops.scatter_update(m, grad.indices,
                                       tf.maximum(beta_t * m_slice + eps, tf.abs(grad.values)))
        m_t_slice = tf.gather(m_t, grad.indices)

        var_update = state_ops.scatter_sub(var, grad.indices,
                                           lr_t * grad.values * (
                                                   1.0 + alpha_t * tf.sign(grad.values) * tf.sign(m_t_slice)))

        # Create an op that groups multiple operations
        # When this op finishes, all ops in input have finished
        return control_flow_ops.group(*[var_update, m_t]) 
Example #9
Source File: diet.py    From fine-lm with MIT License 6 votes vote down vote up
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
Example #10
Source File: losses.py    From object_detector_app with MIT License 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
    """
    diff = prediction_tensor - target_tensor
    abs_diff = tf.abs(diff)
    abs_diff_lt_1 = tf.less(abs_diff, 1)
    anchorwise_smooth_l1norm = tf.reduce_sum(
        tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
        2) * weights
    if self._anchorwise_output:
      return anchorwise_smooth_l1norm
    return tf.reduce_sum(anchorwise_smooth_l1norm) 
Example #11
Source File: neural_gpu.py    From fine-lm with MIT License 6 votes vote down vote up
def neural_gpu_body(inputs, hparams, name=None):
  """The core Neural GPU."""
  with tf.variable_scope(name, "neural_gpu"):

    def step(state, inp):  # pylint: disable=missing-docstring
      x = tf.nn.dropout(state, 1.0 - hparams.dropout)
      for layer in range(hparams.num_hidden_layers):
        x = common_layers.conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            name="cgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state

    return tf.foldl(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=inputs,
        parallel_iterations=1,
        swap_memory=True) 
Example #12
Source File: attacks_tfe.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def generate(self, x, **kwargs):
        """
        Generates the adversarial sample for the given input.
        :param x: The model's inputs.
        :param eps: (optional float) attack step size (input variation)
        :param ord: (optional) Order of the norm (mimics NumPy).
                    Possible values: np.inf, 1 or 2.
        :param y: (optional) A tf variable` with the model labels. Only provide
                  this parameter if you'd like to use true labels when crafting
                  adversarial samples. Otherwise, model predictions are used as
                  labels to avoid the "label leaking" effect (explained in this
                  paper: https://arxiv.org/abs/1611.01236). Default is None.
                  Labels should be one-hot-encoded.
        :param y_target: (optional) A tf variable` with the labels to target.
                            Leave y_target=None if y is also set.
                            Labels should be one-hot-encoded.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)
        labels, nb_classes = self.get_or_guess_labels(x, kwargs)
        return self.fgm(x, labels=labels, targeted=(self.y_target is not None)) 
Example #13
Source File: loss.py    From GroundeR with MIT License 6 votes vote down vote up
def smooth_l1_regression_loss(scores, labels, thres=1.0, is_mean=True):
    # L1(x) = 0.5x^2 (|x|<thres)
    # L1(x) = |x|-0.5 (|x|>=thres)
    diff =  tf.abs(scores - labels)
    thres_mat = thres*tf.ones(diff.get_shape())
    # thres_mat = thres*tf.ones((40, 4))
    smooth_sign = tf.cast(tf.less(diff, thres_mat), tf.float32)

    smooth_opt1 = 0.5*tf.multiply(diff, diff)
    smooth_opt2 = diff-0.5

    loss_mat = tf.multiply(smooth_opt1, smooth_sign) + tf.multiply(smooth_opt2, (1.0-smooth_sign))

    if is_mean:
        loss = tf.reduce_mean(loss_mat)
    else:
        loss = loss_mat
    return loss 
Example #14
Source File: model.py    From dataiku-contrib with Apache License 2.0 5 votes vote down vote up
def trim_zeros_graph(boxes, name='trim_zeros'):
    """Often boxes are represented with matrices of shape [N, 4] and
    are padded with zeros. This removes zero boxes.
    boxes: [N, 4] matrix of boxes.
    non_zeros: [N] a 1D boolean mask identifying the rows to keep
    """
    non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
    boxes = tf.boolean_mask(boxes, non_zeros, name=name)
    return boxes, non_zeros 
Example #15
Source File: cenDoubleLomaxAlgorithms.py    From decompose with MIT License 5 votes vote down vote up
def fit(cls, parameters: Dict[str, Tensor],
            data: tf.Tensor) -> Dict[str, Tensor]:
        parameters = LomaxAlgorithms.fit(parameters=parameters,
                                         data=tf.abs(data))
        return(parameters) 
Example #16
Source File: cenDoubleLomaxAlgorithms.py    From decompose with MIT License 5 votes vote down vote up
def llh(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Tensor:
        llh = LomaxAlgorithms.llh(parameters=parameters,
                                  data=tf.abs(data)) - np.log(2.)
        return(llh) 
Example #17
Source File: cenDoubleLomaxAlgorithms.py    From decompose with MIT License 5 votes vote down vote up
def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor:
        pdf = LomaxAlgorithms.pdf(parameters=parameters, data=tf.abs(data))/2.
        return(pdf) 
Example #18
Source File: tensorFactorisation.py    From decompose with MIT License 5 votes vote down vote up
def random(cls,
               priorU: List[Distribution],
               likelihood: Likelihood,
               M: Tuple[int, ...],
               K: int,
               dtype: tf.DType,
               phase: Phase,
               stopCriterion,
               noiseUniformity: NoiseUniformity = HOMOGENEOUS,
               transform: bool = False) -> "TensorFactorisation":

        # initialize U
        dtype = tf.as_dtype(dtype)
        zero = tf.constant(0., dtype=dtype)
        one = tf.constant(1., dtype=dtype)
        normal = tf.distributions.Normal(loc=zero, scale=one)
        F = len(M)
        U = []
        for f in range(F):
            if priorU[f].nonNegative:
                UfInit = tf.abs(normal.sample(sample_shape=(K, M[f])))
            else:
                UfInit = normal.sample(sample_shape=(K, M[f]))
            U.append(UfInit)

        # instantiate
        tefa = TensorFactorisation(U=U,
                                   priorU=priorU,
                                   likelihood=likelihood,
                                   dtype=dtype,
                                   phase=phase,
                                   transform=transform,
                                   noiseUniformity=noiseUniformity,
                                   stopCriterion=stopCriterion)
        return(tefa) 
Example #19
Source File: cenDoubleLomaxAlgorithms.py    From decompose with MIT License 5 votes vote down vote up
def fitLatents(cls, parameters: Dict[str, Tensor],
                   data: Tensor) -> Dict[str, Tensor]:
        parameters = LomaxAlgorithms.fitLatents(parameters=parameters,
                                                data=tf.abs(data))
        return(parameters) 
Example #20
Source File: localizer.py    From cnn-levelset with MIT License 5 votes vote down vote up
def smooth_l1(x):
    x = tf.abs(x)

    x = tf.select(
        tf.less(x, 1),
        tf.mul(tf.square(x), 0.5),
        tf.sub(x, 0.5)
    )

    x = tf.reshape(x, shape=[-1, 4])
    x = tf.reduce_sum(x, 1)

    return x 
Example #21
Source File: actor_net.py    From RDPG with MIT License 5 votes vote down vote up
def length(self,data):
        used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2))
        length = tf.reduce_sum(used, reduction_indices=1)
        length = tf.cast(length, tf.int32)
        return length 
Example #22
Source File: utils.py    From Adversarial_Video_Generation with MIT License 5 votes vote down vote up
def sharp_diff_error(gen_frames, gt_frames):
    """
    Computes the Sharpness Difference error between the generated images and the ground truth
    images.

    @param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
                       generator model.
    @param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
                      each frame in gen_frames.

    @return: A scalar tensor. The Sharpness Difference error over each frame in the batch.
    """
    shape = tf.shape(gen_frames)
    num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])

    # gradient difference
    # create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
    # TODO: Could this be simplified with one filter [[-1, 2], [0, -1]]?
    pos = tf.constant(np.identity(3), dtype=tf.float32)
    neg = -1 * pos
    filter_x = tf.expand_dims(tf.pack([neg, pos]), 0)  # [-1, 1]
    filter_y = tf.pack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)])  # [[1],[-1]]
    strides = [1, 1, 1, 1]  # stride of (1, 1)
    padding = 'SAME'

    gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
    gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
    gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
    gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))

    gen_grad_sum = gen_dx + gen_dy
    gt_grad_sum = gt_dx + gt_dy

    grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)

    batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))
    return tf.reduce_mean(batch_errors) 
Example #23
Source File: loss_functions.py    From Adversarial_Video_Generation with MIT License 5 votes vote down vote up
def gdl_loss(gen_frames, gt_frames, alpha):
    """
    Calculates the sum of GDL losses between the predicted and ground truth frames.

    @param gen_frames: The predicted frames at each scale.
    @param gt_frames: The ground truth frames at each scale
    @param alpha: The power to which each gradient term is raised.

    @return: The GDL loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(gen_frames)):
        # create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
        pos = tf.constant(np.identity(3), dtype=tf.float32)
        neg = -1 * pos
        filter_x = tf.expand_dims(tf.pack([neg, pos]), 0)  # [-1, 1]
        filter_y = tf.pack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)])  # [[1],[-1]]
        strides = [1, 1, 1, 1]  # stride of (1, 1)
        padding = 'SAME'

        gen_dx = tf.abs(tf.nn.conv2d(gen_frames[i], filter_x, strides, padding=padding))
        gen_dy = tf.abs(tf.nn.conv2d(gen_frames[i], filter_y, strides, padding=padding))
        gt_dx = tf.abs(tf.nn.conv2d(gt_frames[i], filter_x, strides, padding=padding))
        gt_dy = tf.abs(tf.nn.conv2d(gt_frames[i], filter_y, strides, padding=padding))

        grad_diff_x = tf.abs(gt_dx - gen_dx)
        grad_diff_y = tf.abs(gt_dy - gen_dy)

        scale_losses.append(tf.reduce_sum((grad_diff_x ** alpha + grad_diff_y ** alpha)))

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses)) 
Example #24
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def ravanbakhsh_set_layer(layer_size,
                          inputs,
                          mask=None,
                          sequential=False,
                          activation_fn=tf.nn.tanh,
                          dropout=0.0,
                          name=None):
  """Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .

  More parameter-efficient version of a linear-set-layer with context.

  Args:
    layer_size: Dimension to transform the input vectors to.
    inputs: A tensor of shape [batch_size, sequence_length, vector]
      containing the sequences of input vectors.
    mask: A tensor of shape [batch_size, sequence_length] containing a
      mask for the inputs with 1's for existing elements, and 0's elsewhere.
    sequential: If true, will use a running global pool so each element will
      only depend on those before it. Set true if this layer is being used in
      an output sequence.
    activation_fn: The activation function to use.
    dropout: dropout.
    name: name.

  Returns:
    Tensor of shape [batch_size, sequence_length, vector] containing the
    sequences of transformed vectors.
  """
  del dropout
  with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]):
    if sequential:
      return linear_set_layer(
          layer_size,
          inputs - running_global_pool_1d(inputs),
          activation_fn=activation_fn,
          name=name)
    return linear_set_layer(
        layer_size,
        inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),
        activation_fn=activation_fn,
        name=name) 
Example #25
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def belu(x):
  """Bipolar ELU as in https://arxiv.org/abs/1709.04054."""
  x_shape = shape_list(x)
  x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
  y1 = tf.nn.elu(x1)
  y2 = -tf.nn.elu(-x2)
  return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape) 
Example #26
Source File: model_train.py    From ICDAR-2019-SROIE with MIT License 5 votes vote down vote up
def smooth_l1_dist(deltas, sigma2=9.0, name='smooth_l1_dist'):
    with tf.name_scope(name=name) as scope:
        deltas_abs = tf.abs(deltas)
        smoothL1_sign = tf.cast(tf.less(deltas_abs, 1.0 / sigma2), tf.float32)
        return tf.square(deltas) * 0.5 * sigma2 * smoothL1_sign + \
               (deltas_abs - 0.5 / sigma2) * tf.abs(smoothL1_sign - 1) 
Example #27
Source File: common_attention.py    From fine-lm with MIT License 5 votes vote down vote up
def embedding_to_padding(emb):
  """Calculates the padding mask based on which embeddings are all zero.

  We have hacked symbol_modality to return all-zero embeddings for padding.

  Args:
    emb: a Tensor with shape [..., depth].

  Returns:
    a float Tensor with shape [...]. Each element is 1 if its corresponding
    embedding vector is all zero, and is 0 otherwise.
  """
  emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1)
  return tf.to_float(tf.equal(emb_sum, 0.0)) 
Example #28
Source File: common_attention.py    From fine-lm with MIT License 5 votes vote down vote up
def attention_bias_proximal(length):
  """Bias for self-attention to encourage attention to close positions.

  Args:
    length: an integer scalar.

  Returns:
    a Tensor with shape [1, 1, length, length]
  """
  r = tf.to_float(tf.range(length))
  diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1)
  return tf.expand_dims(tf.expand_dims(-tf.log(1 + tf.abs(diff)), 0), 0) 
Example #29
Source File: ops.py    From Generative-Latent-Optimization-Tensorflow with MIT License 5 votes vote down vote up
def lrelu(x, leak=0.2, name="lrelu"):
    with tf.variable_scope(name):
        f1 = 0.5 * (1 + leak)
        f2 = 0.5 * (1 - leak)
        return f1 * x + f2 * abs(x) 
Example #30
Source File: ops.py    From Generative-Latent-Optimization-Tensorflow with MIT License 5 votes vote down vote up
def huber_loss(labels, predictions, delta=1.0):
    residual = tf.abs(predictions - labels)
    condition = tf.less(residual, delta)
    small_res = 0.5 * tf.square(residual)
    large_res = delta * residual - 0.5 * tf.square(delta)
    return tf.where(condition, small_res, large_res)