Python tensorflow.RegisterGradient() Examples

The following are 21 code examples of tensorflow.RegisterGradient(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: utils_pytorch.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def _py_func_with_gradient(func, inp, Tout, stateful=True, name=None,
                           grad_func=None):
    """
    PyFunc defined as given by Tensorflow
    :param func: Custom Function
    :param inp: Function Inputs
    :param Tout: Ouput Type of out Custom Function
    :param stateful: Calculate Gradients when stateful is True
    :param name: Name of the PyFunction
    :param grad: Custom Gradient Function
    :return:
    """
    # Generate random name in order to avoid conflicts with inbuilt names
    rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits(30 * 4)

    # Register Tensorflow Gradient
    tf.RegisterGradient(rnd_name)(grad_func)

    # Get current graph
    g = tf.get_default_graph()

    # Add gradient override map
    with g.gradient_override_map(
            {"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #2
Source File: saliency-maps.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def guided_relu():
    """
    Returns:
        A context where the gradient of :meth:`tf.nn.relu` is replaced by
        guided back-propagation, as described in the paper:
        `Striving for Simplicity: The All Convolutional Net
        <https://arxiv.org/abs/1412.6806>`_
    """
    from tensorflow.python.ops import gen_nn_ops   # noqa

    @tf.RegisterGradient("GuidedReLU")
    def GuidedReluGrad(op, grad):
        return tf.where(0. < grad,
                        gen_nn_ops.relu_grad(grad, op.outputs[0]),
                        tf.zeros(grad.get_shape()))

    g = tf.get_default_graph()
    with g.gradient_override_map({'Relu': 'GuidedReLU'}):
        yield 
Example #3
Source File: utils_pytorch.py    From cleverhans with MIT License 6 votes vote down vote up
def _py_func_with_gradient(func, inp, Tout, stateful=True, name=None,
                           grad_func=None):
  """
  PyFunc defined as given by Tensorflow
  :param func: Custom Function
  :param inp: Function Inputs
  :param Tout: Ouput Type of out Custom Function
  :param stateful: Calculate Gradients when stateful is True
  :param name: Name of the PyFunction
  :param grad: Custom Gradient Function
  :return:
  """
  # Generate random name in order to avoid conflicts with inbuilt names
  rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits(30 * 4)

  # Register Tensorflow Gradient
  tf.RegisterGradient(rnd_name)(grad_func)

  # Get current graph
  g = tf.get_default_graph()

  # Add gradient override map
  with g.gradient_override_map({"PyFunc": rnd_name,
                                "PyFuncStateless": rnd_name}):
    return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #4
Source File: recurrent.py    From neupy with MIT License 6 votes vote down vote up
def clip_gradient(value, clip_value):
    if not hasattr(clip_gradient, 'added_gradients'):
        clip_gradient.added_gradients = set()

    session = tensorflow_session()
    graph = session.graph
    operation_name = "ClipGradient-" + str(clip_value)

    if operation_name not in clip_gradient.added_gradients:
        # Make sure that we won't create the same operation twice.
        # Otherwise tensorflow will trigger an exception.
        @tf.RegisterGradient(operation_name)
        def clip_gradient_grad(op, grad):
            return tf.clip_by_value(grad, -clip_value, clip_value)

        clip_gradient.added_gradients.add(operation_name)

    with graph.gradient_override_map({"Identity": operation_name}):
        return tf.identity(value) 
Example #5
Source File: flipGradientTF.py    From gradient_reversal_keras_tf with GNU General Public License v3.0 6 votes vote down vote up
def reverse_gradient(X, hp_lambda):
    '''Flips the sign of the incoming gradient during training.'''
    try:
        reverse_gradient.num_calls += 1
    except AttributeError:
        reverse_gradient.num_calls = 1

    grad_name = "GradientReversal%d" % reverse_gradient.num_calls

    @tf.RegisterGradient(grad_name)
    def _flip_gradients(op, grad):
        return [tf.negative(grad) * hp_lambda]

    g = K.get_session().graph
    with g.gradient_override_map({'Identity': grad_name}):
        y = tf.identity(X)

    return y 
Example #6
Source File: tf.py    From deep500 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _create_op_handle(compiled_op):
    op = compiled_op.op
    handle = int(compiled_op.lib.create_new_op(
        _to_c_array(op.inputs), len(op.inputs),
        _to_c_array(op.outputs), len(op.outputs)))
    
    # Forward
    def op_functor(*args, **kwargs):
        return compiled_op.op_func(*args, op_handle_ptr=handle, **kwargs)
    
    # Backward
    def op_grad_functor(tfop, *args, **kwargs):
        return compiled_op.op_grad_func(*(args + tuple(tfop.inputs) + tuple(tfop.outputs)), op_handle_ptr=handle, **kwargs)
    try:
        tf.RegisterGradient('TfOp' + op.name)(op_grad_functor)
    except KeyError as ex:
        print("Warning: Gradient already registered to another handle")

    return op_functor, compiled_op.lib, handle 
Example #7
Source File: model.py    From ICPR_TextDection with GNU General Public License v3.0 5 votes vote down vote up
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    # Need to generate a unique name to avoid duplicates:
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #8
Source File: utils.py    From aitom with GNU General Public License v3.0 5 votes vote down vote up
def py_func(func, inp, Tout, stateful = True, name=None, grad_func=None):
    rand_name = 'PyFuncGrad' + str(np.random.randint(0,1E+8))
    tf.RegisterGradient(rand_name)(grad_func)
    g = tf.get_default_graph()
    with g.gradient_override_map({'PyFunc':rand_name}):
        return tf.py_func(func,inp,Tout,stateful=stateful, name=name) 
Example #9
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def cholesky(self, A, lower=True, warn=True, correct=False):
        assert lower is True

        # Gradient through py_func adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
        def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
            rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
            tf.RegisterGradient(rnd_name)(grad)
            g = tf.get_default_graph()
            with g.gradient_override_map({'PyFunc': rnd_name, 'PyFuncStateless': rnd_name}):
                return tf.py_func(func, inp, Tout, stateful=stateful, name=name)

        def correction(A):
            A_new, del_ = A.copy(), 1e-4
            while True:
                try:
                    np.linalg.cholesky(A_new)
                    break
                except np.linalg.linalg.LinAlgError:
                    if warn:
                        logging.warn('[Cholesky] singular matrix, adding diagonal {}'.format(del_))
                    A_new = A + del_ * np.eye(A.shape[-1]).astype(self.floatx())
                    del_ *= 2
            return A_new

        def _correction_grad(op, grad):
            A = op.inputs[0]
            return grad

        if correct:
            shape = A.get_shape()
            A = py_func(correction, [A], A.dtype, grad=_correction_grad)
            A.set_shape(shape)
        return tf.cholesky(A)

    # Tensorflow interface 
Example #10
Source File: tensorflow_backend.py    From keras-vis with MIT License 5 votes vote down vote up
def _register_guided_gradient(name):
    if name not in ops._gradient_registry._registry:
        @tf.RegisterGradient(name)
        def _guided_backprop(op, grad):
            dtype = op.outputs[0].dtype
            gate_g = tf.cast(grad > 0., dtype)
            gate_y = tf.cast(op.outputs[0] > 0., dtype)
            return gate_y * gate_g * grad 
Example #11
Source File: tensorflow_backend.py    From keras-vis with MIT License 5 votes vote down vote up
def _register_rectified_gradient(name):
    if name not in ops._gradient_registry._registry:
        @tf.RegisterGradient(name)
        def _relu_backprop(op, grad):
            dtype = op.outputs[0].dtype
            gate_g = tf.cast(grad > 0., dtype)
            return gate_g * grad

# Map of modifier type to registration function. 
Example #12
Source File: ops.py    From stylegan_reimplementation with Apache License 2.0 5 votes vote down vote up
def pixel_norm(x):
    return x * tf.rsqrt(tf.reduce_mean(tf.square(x), keep_dims=True, axis=-1)+1e-8)


#@tf.RegisterGradient("DepthwiseConv2dNativeBackpropInput")
#def _DepthwiseConv2dNativeBackpropInputGrad(op, grad):
#    return None, None, tf.zeros_like(op.inputs[2]) 
Example #13
Source File: saliency.py    From will-people-like-your-image with GNU Lesser General Public License v3.0 5 votes vote down vote up
def guided_relu():
    from tensorflow.python.ops import gen_nn_ops   # noqa

    @tf.RegisterGradient("GuidedReLU")
    def GuidedReluGrad(op, grad):
        return tf.where(0. < grad,
                        gen_nn_ops._relu_grad(grad, op.outputs[0]),
                        tf.zeros(grad.get_shape()))

    g = tf.get_default_graph()
    with g.gradient_override_map({'Relu': 'GuidedReLU'}):
        yield 
Example #14
Source File: model_1.py    From ICPR_TextDection with GNU General Public License v3.0 5 votes vote down vote up
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    # Need to generate a unique name to avoid duplicates:
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #15
Source File: im_rotate.py    From ambient-gan with MIT License 5 votes vote down vote up
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):

    # Need to generate a unique name to avoid duplicates:
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #16
Source File: differentiable_tls.py    From SPFN with MIT License 5 votes vote down vote up
def register_custom_svd_gradient():
    tf.RegisterGradient('CustomSvd')(custom_gradient_svd) 
Example #17
Source File: utils.py    From cleverhans with MIT License 5 votes vote down vote up
def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None):
  """Custom py_func with gradient support

  """
  # Need to generate a unique name to avoid duplicates:
  rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

  tf.RegisterGradient(rnd_name)(grad)
  g = tf.get_default_graph()
  with g.gradient_override_map({"PyFunc": rnd_name,
                                "PyFuncStateless": rnd_name}):
    return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #18
Source File: utils.py    From parsimonious-blackbox-attack with MIT License 5 votes vote down vote up
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    global _py_func_id

    rnd_name = 'PyFuncGrad' + '%08d' % _py_func_id
    _py_func_id += 1

    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #19
Source File: utils.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None):
    """Custom py_func with gradient support

    """
    # Need to generate a unique name to avoid duplicates:
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({
            "PyFunc": rnd_name,
            "PyFuncStateless": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name) 
Example #20
Source File: utilities.py    From safe_learning with MIT License 4 votes vote down vote up
def make_tf_fun(return_type, gradient=None, stateful=True):
    """Convert a python function to a tensorflow function.

    Parameters
    ----------
    return_type : list
        A list of tensorflow return types. Needs to match with the gradient.
    gradient : callable, optional
        A function that provides the gradient. It takes `op` and one gradient
        per output of the function as inputs and returns one gradient for each
        input of the function. If stateful is `False` then tensorflow does not
        seem to compute gradients at all.

    Returns
    -------
    A tensorflow function with gradients registered.
    """
    def wrap(function):
        """Create a new function."""
        # Function name with stipped underscore (not allowed by tensorflow)
        name = function.__name__.lstrip('_')

        # Without gradients we can take the short route here
        if gradient is None:
            @wraps(function)
            def wrapped_function(self, *args, **kwargs):
                method = partial(function, self, **kwargs)
                return tf.py_func(method, args, return_type,
                                  stateful=stateful, name=name)

            return wrapped_function

        # Name for the gradient operation
        grad_name = name + '_gradient'

        @wraps(function)
        def wrapped_function(self, *args):
            # Overwrite the gradient
            graph = tf.get_default_graph()

            # Make sure the name we specify is unique
            unique_grad_name = graph.unique_name(grad_name)

            # Register the new gradient method with tensorflow
            tf.RegisterGradient(unique_grad_name)(gradient)

            # Remove self: Tensorflow does not allow for non-tensor inputs
            method = partial(function, self)

            with graph.gradient_override_map({"PyFunc": unique_grad_name}):
                return tf.py_func(method, args, return_type,
                                  stateful=stateful, name=name)

        return wrapped_function
    return wrap 
Example #21
Source File: tf.py    From deep500 with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def custom_op(op: Union[CustomOp, CompilableOp, TFCompiledOp], stateful=True, name=None,
              use_autodiff=False, compile_only=False, return_handle=False):
    """
        Registers a custom Tensorflow operator from `CustomOp`, 
        `CompilableOp`, or `TFCompiledOp` objects.
        @param op The custom operator. If numpy is not used, automatic 
                    differentiation via Tensorflow applies.
        @param stateful True if the operation is not a pure function (enables
                        sub-expression elimination optimizations if False).
        @param name Specify a custom name for this operation.
        @param use_autodiff If true, uses tensorflow tensors, otherwise 
                            assumes numpy arrays.
        @param compile_only If true, returns a TFCompiledOp instead of an instantiated op
        @param return_handle (for C++ ops) If true, also returns a direct handle
                             to the operator object and library as a 3-tuple:
                             (operator, library, handle).
        @return A tf.Operation object (or a function) that calls the custom operator.
    """
    if isinstance(op, CompilableOp):
        result = _custom_cpp_op(op, stateful, name)
        if compile_only:
            return result
        else:
            op = result
    if isinstance(op, TFCompiledOp):
        result = _create_op_handle(op)
        if return_handle:
            return result
        else:
            return result[0]
    elif isinstance(op, CustomOp):
        if use_autodiff == True:
            return op.forward

        def _fwd(*inputs):
            return op.forward(*inputs)
        def _bwd(tfop, *grads):
            def _actual_bwd(*args):
                return op.backward(args[:len(grads)], 
                                     args[len(grads):(len(grads)+len(tfop.inputs))], 
                                     args[(len(grads)+len(tfop.inputs)):])
            return tf.py_func(_actual_bwd, 
                              (list(grads) + list(tfop.inputs) + list(tfop.outputs)), 
                              [inp.dtype for inp in op.input_descriptors], 
                              stateful=stateful)

        # Gradient replacement adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342

        # Generate a unique name to avoid duplicates
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
        tf.RegisterGradient(rnd_name)(_bwd)

        def result(*inputs):
            g = tf.get_default_graph()
            with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
                return tf.py_func(_fwd, inputs, 
                                  [out.dtype for out in op.output_descriptors],
                                  stateful=stateful, name=name)
        return result