Python theano.gof.Op() Examples

The following are 30 code examples of theano.gof.Op(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.gof , or try the search function .
Example #1
Source File: elemwise.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def c_code_cache_version_apply(self, node):
        version = [12]  # the version corresponding to the c code in this Op

        # now we insert versions for the ops on which we depend...
        scalar_node = Apply(
            self.scalar_op,
            [get_scalar_type(dtype=input.type.dtype).make_variable()
             for input in node.inputs],
            [get_scalar_type(dtype=output.type.dtype).make_variable()
             for output in node.outputs])
        version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))
        for i in node.inputs + node.outputs:
            version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())
        version.append(('openmp', self.openmp))
        if all(version):
            return tuple(version)
        else:
            return () 
Example #2
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_dxdx():

    # Tests that the gradient of a scalar with respect to itself is 1
    # I use an integer in this case because people keep changing this
    # gradient to be 0 on integers but according to our interpretation
    # of the gradient as defined in the Op contract, it should be 1.
    # If you feel the need to change this unit test you are probably
    # modifying the Op contract and should definitely get the approval
    # of multiple people on theano-dev.

    x = theano.tensor.iscalar()
    g = theano.tensor.grad(x, x)

    g = g.eval({x: 12})

    assert np.allclose(g, 1.) 
Example #3
Source File: test_debugmode.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_aliased_outputs_ok_output(self):
        # here aliased outputs is ok because they are both outputs of the
        # function as a whole and thus not destroy-able
        class CustomOp(gof.Op):
            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                r = a * 2
                c[0] = r
                d[0] = r[1:]

        x = theano.tensor.dvector()
        y = theano.tensor.dvector()
        f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')

        r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert numpy.all(r0 == [2, 4, 6, 8])
        assert numpy.all(r1 == [4, 6, 8]) 
Example #4
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_unimplemented_grad_grad(self):
        # tests that unimplemented grads are caught in the grad method

        class DummyOp(gof.Op):
            __props__ = ()

            def make_node(self, x):
                return gof.Apply(self, [x], [x.type()])

            def grad(self, inputs, output_grads):
                return [theano.gradient.grad_not_implemented(self, 0, inputs[0])]

        a = theano.tensor.scalar()
        b = DummyOp()(a)

        self.assertRaises(TypeError, theano.gradient.grad, b, a) 
Example #5
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_Nin_Nout(self):
        """Test grad is called correctly for a many-to-many op"""
        gval0 = theano.tensor.matrix()
        gval1 = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.matrix(), theano.tensor.matrix()]
                outputs = [theano.tensor.matrix(), theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                return gval0, gval1
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval0)
        self.assertTrue(g[a1.inputs[1]] is gval1) 
Example #6
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_1in_Nout(self):
        """Test grad is called correctly for a 1-to-many op"""
        gval = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.matrix()]
                outputs = [theano.tensor.scalar(), theano.tensor.scalar()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                x, = inp
                gz1, gz2 = grads
                return gval,
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval) 
Example #7
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_1in_1out(self):
        """Test grad is called correctly for a 1-to-1 op"""
        gval = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.matrix()]
                outputs = [theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                return gval,
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval) 
Example #8
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_wrong_rval_len1(self):
        """Test that it is not ok to return the wrong number of gradient terms
        """
        class retOne(gof.op.Op):
            __props__ = ()

            def make_node(self, *inputs):
                outputs = [theano.tensor.vector()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inputs, grads):
                return [inputs[0].zeros_like()]

        i = theano.tensor.vector()
        j = theano.tensor.vector()
        a1 = retOne().make_node(i)
        grad_sources_inputs([(a1.out, one)], None)
        a2 = retOne().make_node(i, j)
        self.assertRaises(ValueError, grad_sources_inputs, [(a2.out, one)], None) 
Example #9
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_retNone1(self):
        """Test that it is not ok to return None from op.grad()"""
        class retNone(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.vector()]
                outputs = [theano.tensor.vector()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                x, = inp
                gz, = grads
                pass
        a = retNone().make_node()
        self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None) 
Example #10
Source File: test_debugmode.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_aliased_outputs_ok_shadow(self):
        # here the alias between outputs is ok because one of them is not used
        # for subsequent computation.  This is like the case where we use one
        # output as a memory buffer to serve another output.
        class CustomOp(gof.Op):
            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                r = a * 1
                c[0] = r
                d[0] = r[1:]

        x = theano.tensor.dvector('x')
        y = theano.tensor.dvector('y')
        f = theano.function([x, y], CustomOp()(x, y)[0] * 2, mode='DEBUG_MODE')

        r0 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert numpy.all(r0 == [2, 4, 6, 8]) 
Example #11
Source File: ops.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def register_shape_c_code(type, code, version=()):
    """
    Tell Shape Op how to generate C code for a Theano Type.

    Parameters
    ----------
    typ : Theano type
        It must be the Theano class itself and not an instance of the class.
    code : C code
        Returns a vector representing the shape for the Theano type 'typ'.
        Use %(iname)s and %(oname)s for the input and output C variable names
        respectively.
    version
        A number indicating the version of the code, for cache.

    """
    Shape.c_code_and_version[type] = (code, version) 
Example #12
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_undefined_cost_grad():

        # Tests that if we say the cost is not differentiable via the
        # known_grads mechanism, it is treated as such by the rest of the
        # system.
        # This is so that Ops that are built around minigraphs like OpFromGraph
        # and scan can implement Op.grad by passing ograds to known_grads

        x = theano.tensor.iscalar()
        y = theano.tensor.iscalar()
        cost = x + y
        assert cost.dtype in theano.tensor.discrete_dtypes
        try:
            theano.tensor.grad(cost, [x, y], known_grads={cost: NullType()()})
        except theano.gradient.NullTypeGradError:
            return
        raise AssertionError("An undefined gradient has been ignored.") 
Example #13
Source File: test_debugmode.py    From D-VAE with MIT License 6 votes vote down vote up
def test_aliased_outputs_ok_shadow(self):
        # here the alias between outputs is ok because one of them is not used
        # for subsequent computation.  This is like the case where we use one
        # output as a memory buffer to serve another output.
        class CustomOp(gof.Op):
            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                r = a * 1
                c[0] = r
                d[0] = r[1:]

        x = theano.tensor.dvector('x')
        y = theano.tensor.dvector('y')
        f = theano.function([x, y], CustomOp()(x, y)[0] * 2, mode='DEBUG_MODE')

        r0 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert numpy.all(r0 == [2, 4, 6, 8]) 
Example #14
Source File: test_debugmode.py    From D-VAE with MIT License 6 votes vote down vote up
def test_aliased_outputs_ok_output(self):
        # here aliased outputs is ok because they are both outputs of the
        # function as a whole and thus not destroy-able
        class CustomOp(gof.Op):
            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                r = a * 2
                c[0] = r
                d[0] = r[1:]

        x = theano.tensor.dvector()
        y = theano.tensor.dvector()
        f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')

        r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert numpy.all(r0 == [2, 4, 6, 8])
        assert numpy.all(r1 == [4, 6, 8]) 
Example #15
Source File: test_debugmode.py    From D-VAE with MIT License 6 votes vote down vote up
def test_aliased_outputs_ok(self):
        # here aliased outputs is ok because they are both aliased to an input
        # as well
        class CustomOp(gof.Op):
            view_map = {0: [0], 1: [0]}

            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                c[0] = a
                d[0] = a[1:]

        x = theano.tensor.dvector('x')
        y = theano.tensor.dvector('y')
        f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')

        r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert numpy.all(r0 == [1, 2, 3, 4])
        assert numpy.all(r1 == [2, 3, 4]) 
Example #16
Source File: elemwise.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def c_code_cache_version_apply(self, node):
        version = (6,)  # the version corresponding to the c code in this Op

        # now we insert versions for the ops on which we depend...
        scalar_node = Apply(
            self.scalar_op,
            [get_scalar_type(dtype=input.type.dtype).make_variable()
             for input in node.inputs],
            [get_scalar_type(dtype=output.type.dtype).make_variable()
             for output in node.outputs])
        version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))
        for i in node.inputs + node.outputs:
            version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())
        if all(version):
            return tuple(version)
        else:
            return () 
Example #17
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_wrong_rval_len1(self):
        """Test that it is not ok to return the wrong number of gradient terms
        """
        class retOne(gof.op.Op):
            __props__ = ()
            def make_node(self, *inputs):
                outputs = [theano.tensor.vector()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inputs, grads):
                return [inputs[0].zeros_like()]

        i = theano.tensor.vector()
        j = theano.tensor.vector()
        a1 = retOne().make_node(i)
        grad_sources_inputs([(a1.out, one)], None)
        a2 = retOne().make_node(i, j)
        self.assertRaises(ValueError, grad_sources_inputs,
                [(a2.out, one)], None) 
Example #18
Source File: ops.py    From D-VAE with MIT License 6 votes vote down vote up
def register_shape_c_code(type, code, version=()):
    """
    Tell Shape Op how to generate C code for a Theano Type.

    Parameters
    ----------
    typ : Theano type
        It must be the Theano class itself and not an instance of the class.
    code : C code
        Returns a vector representing the shape for the Theano type 'typ'.
        Use %(iname)s and %(oname)s for the input and output C variable names
        respectively.
    version
        A number indicating the version of the code, for cache.

    """
    Shape.c_code_and_version[type] = (code, version) 
Example #19
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_1in_1out(self):
        """Test grad is called correctly for a 1-to-1 op"""
        gval = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()
            def make_node(self):
                inputs = [theano.tensor.matrix()]
                outputs = [theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                return gval,
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval) 
Example #20
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_1in_Nout(self):
        """Test grad is called correctly for a 1-to-many op"""
        gval = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()
            def make_node(self):
                inputs = [theano.tensor.matrix()]
                outputs = [theano.tensor.scalar(), theano.tensor.scalar()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                x, = inp
                gz1, gz2 = grads
                return gval,
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval) 
Example #21
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_Nin_Nout(self):
        """Test grad is called correctly for a many-to-many op"""
        gval0 = theano.tensor.matrix()
        gval1 = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()
            def make_node(self):
                inputs = [theano.tensor.matrix(), theano.tensor.matrix()]
                outputs = [theano.tensor.matrix(), theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                return gval0, gval1
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval0)
        self.assertTrue(g[a1.inputs[1]] is gval1) 
Example #22
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_unimplemented_grad_grad(self):
        # tests that unimplemented grads are caught in the grad method

        class DummyOp(gof.Op):
            __props__ = ()
            def make_node(self, x):
                return gof.Apply(self, [x], [x.type()])

            def grad(self, inputs, output_grads):
                return [theano.gradient.grad_not_implemented(
                            self, 0, inputs[0])]

        a = theano.tensor.scalar()
        b = DummyOp()(a)

        self.assertRaises(TypeError, theano.gradient.grad, b, a) 
Example #23
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_dxdx():

    # Tests that the gradient of a scalar with respect to itself is 1
    # I use an integer in this case because people keep changing this
    # gradient to be 0 on integers but according to our interpretation
    # of the gradient as defined in the Op contract, it should be 1.
    # If you feel the need to change this unit test you are probably
    # modifying the Op contract and should definitely get the approval
    # of multiple people on theano-dev.

    x = theano.tensor.iscalar()
    g = theano.tensor.grad(x, x)

    g = g.eval({ x : 12 })

    assert np.allclose(g, 1.) 
Example #24
Source File: test_gradient.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_undefined_cost_grad():

        # Tests that if we say the cost is not differentiable via the
        # known_grads mechanism, it is treated as such by the rest of the
        # system.
        # This is so that Ops that are built around minigraphs like OpFromGraph
        # and scan can implement Op.grad by passing ograds to known_grads

        x = theano.tensor.iscalar()
        y = theano.tensor.iscalar()
        cost = x + y
        assert cost.dtype in theano.tensor.discrete_dtypes
        try:
            grads = theano.tensor.grad(cost, [x, y], known_grads={cost: NullType()() })
        except theano.gradient.NullTypeGradError:
            return
        raise AssertionError("An undefined gradient has been ignored.") 
Example #25
Source File: test_debugmode.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_aliased_outputs_ok(self):
        # here aliased outputs is ok because they are both aliased to an input
        # as well
        class CustomOp(gof.Op):
            view_map = {0: [0], 1: [0]}

            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                c[0] = a
                d[0] = a[1:]

        x = theano.tensor.dvector('x')
        y = theano.tensor.dvector('y')
        f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')

        r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert numpy.all(r0 == [1, 2, 3, 4])
        assert numpy.all(r1 == [2, 3, 4]) 
Example #26
Source File: elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def c_code_cache_version_apply(self, node):
        version = (6,)  # the version corresponding to the c code in this Op

        # now we insert versions for the ops on which we depend...
        scalar_node = Apply(
            self.scalar_op,
            [get_scalar_type(dtype=input.type.dtype).make_variable()
             for input in node.inputs],
            [get_scalar_type(dtype=output.type.dtype).make_variable()
             for output in node.outputs])
        version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))
        for i in node.inputs + node.outputs:
            version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())
        if all(version):
            return tuple(version)
        else:
            return () 
Example #27
Source File: elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def c_code_cache_version_apply(self, node):
        version = [12]  # the version corresponding to the c code in this Op

        # now we insert versions for the ops on which we depend...
        scalar_node = Apply(
            self.scalar_op,
            [get_scalar_type(dtype=input.type.dtype).make_variable()
             for input in node.inputs],
            [get_scalar_type(dtype=output.type.dtype).make_variable()
             for output in node.outputs])
        version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))
        for i in node.inputs + node.outputs:
            version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())
        version.append(('openmp', self.openmp))
        if all(version):
            return tuple(version)
        else:
            return () 
Example #28
Source File: abstract_conv.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def infer_shape(self, node, input_shapes):
        # We use self.imshp (that was passed when creating the Op) if possible,
        # or fall back to the `shape` input of the node.
        # TODO: when there is no subsampling, try to infer the image shape
        # from the shapes of inputs.
        kshp = input_shapes[0]
        topshp = input_shapes[1]
        imshp = self.imshp[:] if self.imshp is not None else [None] * 4
        fallback_imshp = [topshp[0], kshp[1], node.inputs[2][0],
                          node.inputs[2][1]]
        imshp = [fallback_imshp[i] if imshp[i] is None else imshp[i]
                 for i in range(4)]
        return [imshp] 
Example #29
Source File: test_debugmode.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def __init__(self, py_offset):
        gof.Op.__init__(self)
        self.py_offset = py_offset 
Example #30
Source File: test_debugmode.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_aliased_outputs_bad(self):
        # here the alias between outputs is not ok because destroying one
        # destroys the other, but there's no way to warn theano about it
        # through the view_map mechanism.
        class CustomOp(gof.Op):
            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return gof.Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                r = a * 1
                c[0] = r[:-1]
                d[0] = r[1:]

        custom_op = CustomOp()

        x = theano.tensor.dvector()
        y = theano.tensor.dvector()
        bad_xy0, bad_xy1 = custom_op(x, y)
        out = bad_xy0 * 2 + bad_xy1 * 2
        f = theano.function([x, y], out, mode='DEBUG_MODE')

        try:
            f([1, 2, 3, 4], [5, 6, 7, 8])
            assert False  # DebugMode should have caught the error
        except debugmode.BadViewMap as e:
            # print e
            pass

        # the situation can be rescued by picking one of the inputs and
        # pretending that it is aliased to both the outputs.
        # This unfairly disables any destructive operations on the
        # input, but guarantees correctness.
        #custom_op.view_map = {0:[0], 1:[1]}
        # f([1,2,3,4],[5,6,7,8])