Python theano.OpFromGraph() Examples

The following are 23 code examples of theano.OpFromGraph(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano , or try the search function .
Example #1
Source File: theano_op.py    From gempy with GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_gradient(self, output: str, wrt: str):
        theano.config.compute_test_value = 'ignore'
        interpolator = self.model._interpolator
        out = self.get_output(output)
        wrt_ = self.get_wrt(wrt)

        geo_model_T = theano.OpFromGraph(interpolator.theano_graph.input_parameters_loop,
                                         [theano.grad(out[0], wrt_)],
                                         inline=True,
                                         on_unused_input='ignore',
                                         name='test_'+output)

        i = interpolator.get_python_input_block()
        th_f = theano.function([], geo_model_T(*i), on_unused_input='warn')

        interpolator.theano_graph.sig_slope.set_value(20)

        return th_f() 
Example #2
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, *args):
        # constants needs to be manually converted to tensors
        def try_convert_tensor(arg):
            if treeano.utils.is_variable(arg):
                return arg
            else:
                return T.constant(arg, dtype=fX)

        args = map(try_convert_tensor, args)
        # OpFromGraph is oblique to Theano optimizations, so we need to move
        # things to GPU ourselves if needed.
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        # move the input to GPU if needed.
        args = map(maybe_to_gpu, args)
        # note the tensor type of the input variable to the fn
        # (mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_types = tuple([arg.type for arg in args])
        # create a suitable Op if not yet done
        if tensor_types not in self.ops:
            # create an input variable of the correct type
            inps = [tensor_type() for tensor_type in tensor_types]
            # pass it through the fn (and move to GPU if needed)
            outp = maybe_to_gpu(self.fn(*inps))
            # fix the forward expression
            op = theano.OpFromGraph(inps, [outp])
            # keep a reference to previous gradient
            op.overwritten_grad = op.grad
            # replace the gradient with our own
            op.grad = self.grad
            # Finally, we memoize the new Op
            self.ops[tensor_types] = op
        # apply the memoized Op to the input we got
        return self.ops[tensor_types](*args) 
Example #3
Source File: theano_op.py    From gempy with GNU Lesser General Public License v3.0 5 votes vote down vote up
def set_th_op(self, output):
        interpolator = self.model._interpolator
        out = self.get_output(output)

        i = interpolator.get_python_input_block()
        theano.config.compute_test_value = 'ignore'
        self.th_op = theano.OpFromGraph(interpolator.theano_graph.input_parameters_loop,
                                        [out],
                                        inline=False,
                                        on_unused_input='ignore',
                                        name=output)
        return self.th_op 
Example #4
Source File: utils.py    From treeano with Apache License 2.0 5 votes vote down vote up
def __call__(self, *args):
        # constants needs to be manually converted to tensors
        def try_convert_tensor(arg):
            if treeano.utils.is_variable(arg):
                return arg
            else:
                return T.constant(arg, dtype=fX)

        args = map(try_convert_tensor, args)
        # OpFromGraph is oblique to Theano optimizations, so we need to move
        # things to GPU ourselves if needed.
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        # move the input to GPU if needed.
        args = map(maybe_to_gpu, args)
        # note the tensor type of the input variable to the fn
        # (mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_types = tuple([arg.type for arg in args])
        # create a suitable Op if not yet done
        if tensor_types not in self.ops:
            # create an input variable of the correct type
            inps = [tensor_type() for tensor_type in tensor_types]
            # pass it through the fn (and move to GPU if needed)
            outp = maybe_to_gpu(self.fn(*inps))
            # fix the forward expression
            op = theano.OpFromGraph(inps, [outp])
            # keep a reference to previous gradient
            op.overwritten_grad = op.grad
            # replace the gradient with our own
            op.grad = self.grad
            # Finally, we memoize the new Op
            self.ops[tensor_types] = op
        # apply the memoized Op to the input we got
        return self.ops[tensor_types](*args) 
Example #5
Source File: utils.py    From treeano with Apache License 2.0 5 votes vote down vote up
def __init__(self, fn):
        self.fn = fn
        # memoizes an OpFromGraph instance per tensor type
        self.ops = {} 
Example #6
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, *args):
        # constants needs to be manually converted to tensors
        def try_convert_tensor(arg):
            if treeano.utils.is_variable(arg):
                return arg
            else:
                return T.constant(arg, dtype=fX)

        args = map(try_convert_tensor, args)
        # OpFromGraph is oblique to Theano optimizations, so we need to move
        # things to GPU ourselves if needed.
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        # move the input to GPU if needed.
        args = map(maybe_to_gpu, args)
        # note the tensor type of the input variable to the fn
        # (mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_types = tuple([arg.type for arg in args])
        # create a suitable Op if not yet done
        if tensor_types not in self.ops:
            # create an input variable of the correct type
            inps = [tensor_type() for tensor_type in tensor_types]
            # pass it through the fn (and move to GPU if needed)
            outp = maybe_to_gpu(self.fn(*inps))
            # fix the forward expression
            op = theano.OpFromGraph(inps, [outp])
            # keep a reference to previous gradient
            op.overwritten_grad = op.grad
            # replace the gradient with our own
            op.grad = self.grad
            # Finally, we memoize the new Op
            self.ops[tensor_types] = op
        # apply the memoized Op to the input we got
        return self.ops[tensor_types](*args) 
Example #7
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, fn):
        self.fn = fn
        # memoizes an OpFromGraph instance per tensor type
        self.ops = {} 
Example #8
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, *args):
        # constants needs to be manually converted to tensors
        def try_convert_tensor(arg):
            if treeano.utils.is_variable(arg):
                return arg
            else:
                return T.constant(arg, dtype=fX)

        args = map(try_convert_tensor, args)
        # OpFromGraph is oblique to Theano optimizations, so we need to move
        # things to GPU ourselves if needed.
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        # move the input to GPU if needed.
        args = map(maybe_to_gpu, args)
        # note the tensor type of the input variable to the fn
        # (mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_types = tuple([arg.type for arg in args])
        # create a suitable Op if not yet done
        if tensor_types not in self.ops:
            # create an input variable of the correct type
            inps = [tensor_type() for tensor_type in tensor_types]
            # pass it through the fn (and move to GPU if needed)
            outp = maybe_to_gpu(self.fn(*inps))
            # fix the forward expression
            op = theano.OpFromGraph(inps, [outp])
            # keep a reference to previous gradient
            op.overwritten_grad = op.grad
            # replace the gradient with our own
            op.grad = self.grad
            # Finally, we memoize the new Op
            self.ops[tensor_types] = op
        # apply the memoized Op to the input we got
        return self.ops[tensor_types](*args) 
Example #9
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, fn):
        self.fn = fn
        # memoizes an OpFromGraph instance per tensor type
        self.ops = {} 
Example #10
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, fn):
        self.fn = fn
        # memoizes an OpFromGraph instance per tensor type
        self.ops = {} 
Example #11
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, *args):
        # constants needs to be manually converted to tensors
        def try_convert_tensor(arg):
            if treeano.utils.is_variable(arg):
                return arg
            else:
                return T.constant(arg, dtype=fX)

        args = map(try_convert_tensor, args)
        # OpFromGraph is oblique to Theano optimizations, so we need to move
        # things to GPU ourselves if needed.
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        # move the input to GPU if needed.
        args = map(maybe_to_gpu, args)
        # note the tensor type of the input variable to the fn
        # (mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_types = tuple([arg.type for arg in args])
        # create a suitable Op if not yet done
        if tensor_types not in self.ops:
            # create an input variable of the correct type
            inps = [tensor_type() for tensor_type in tensor_types]
            # pass it through the fn (and move to GPU if needed)
            outp = maybe_to_gpu(self.fn(*inps))
            # fix the forward expression
            op = theano.OpFromGraph(inps, [outp])
            # keep a reference to previous gradient
            op.overwritten_grad = op.grad
            # replace the gradient with our own
            op.grad = self.grad
            # Finally, we memoize the new Op
            self.ops[tensor_types] = op
        # apply the memoized Op to the input we got
        return self.ops[tensor_types](*args) 
Example #12
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, fn):
        self.fn = fn
        # memoizes an OpFromGraph instance per tensor type
        self.ops = {} 
Example #13
Source File: builders.py    From D-VAE with MIT License 5 votes vote down vote up
def make_thunk(self, node, storage_map, compute_map, no_recycling):
        ret = super(OpFromGraph, self).make_thunk(node, storage_map,
                                                  compute_map, no_recycling)
        if not hasattr(self, "fn"):
            self.fn = orig_function(self.new_inputs,
                                    self.new_outputs,
                                    **self.kwargs)
        return ret 
Example #14
Source File: utils.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, fn):
        self.fn = fn
        # memoizes an OpFromGraph instance per tensor type
        self.ops = {} 
Example #15
Source File: models.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def __init__(self):
        x, y, z = T.scalars('xyz')
        e = T.nnet.sigmoid((x + y + z)**2)
        op = th.OpFromGraph([x, y, z], [e])
        e2 = op(x, y, z) + op(z, y, x)

        self.inputs = [x, y, z]
        self.outputs = [e2] 
Example #16
Source File: models.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def __init__(self):
        x, y, z = T.scalars('xyz')
        e = x * y
        op = th.OpFromGraph([x, y], [e])
        e2 = op(x, y) + z
        op2 = th.OpFromGraph([x, y, z], [e2])
        e3 = op2(x, y, z) + z

        self.inputs = [x, y, z]
        self.outputs = [e3] 
Example #17
Source File: test_scan_utils.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_opfromgraph(self):
        # as with the scan tests above, insert foreign inputs into the
        # inner graph.
        outer = tensor.scalar("outer")
        shared = theano.shared(
            numpy.array(1., dtype=theano.config.floatX),
            name="shared")
        constant = tensor.constant(1., name="constant")
        z = outer * (shared + constant)

        # construct the inner graph
        a = tensor.scalar()
        b = tensor.scalar()
        r = a + b
        r.tag.replacement = z * (a - b)

        # construct the outer graph
        c = tensor.scalar()
        d = tensor.scalar()
        u = theano.OpFromGraph([a, b], [r])(c, d)
        t = z * u
        v, = map_variables(self.replacer, [t])
        t2 = z * v

        f = theano.function([c, d, outer], [t, t2])
        for m, n in itertools.combinations(range(10), 2):
            assert f(m, n, outer=0.5) == [m + n, m - n]

        # test that the unsupported case of replacement with a shared
        # variable with updates crashes
        shared.update = shared + 1
        self.assertRaises(NotImplementedError,
                          map_variables, self.replacer, [t]) 
Example #18
Source File: builders.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def grad(self, inputs, output_grads):
        if hasattr(self, "grad_ops"):
            grad_ops = self.grad_ops
        else:
            gs = theano.gradient.grad(cost=None,
                                      known_grads=dict(izip(self.new_outputs,
                                                            output_grads)),
                                      wrt=self.new_inputs,
                                      disconnected_inputs='ignore')

            grad_ops = []
            for g in gs:
                if g is None:
                    grad_ops.append(lambda *args: None)
                else:
                    # It is normal if some inputs are not needed in order
                    # to compute the gradient, so we ignore them.
                    grad_ops.append(OpFromGraph(self.new_inputs + output_grads,
                                                [g],
                                                on_unused_input='ignore'))
            self.grad_ops = grad_ops

        return [go(*(inputs + output_grads)) for go in grad_ops]

# Since OpFromGraph contains a Theano compiled function, we should let
# DebugMode know about it 
Example #19
Source File: builders.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def make_thunk(self, node, storage_map, compute_map, no_recycling):
        ret = super(OpFromGraph, self).make_thunk(node, storage_map,
                                                  compute_map, no_recycling)
        if not hasattr(self, "fn"):
            self.fn = orig_function(self.new_inputs,
                                    self.new_outputs,
                                    **self.kwargs)
        return ret 
Example #20
Source File: models.py    From D-VAE with MIT License 5 votes vote down vote up
def __init__(self):
        x, y, z = T.scalars('xyz')
        e = T.nnet.sigmoid((x + y + z)**2)
        op = th.OpFromGraph([x, y, z], [e])
        e2 = op(x, y, z) + op(z, y, x)

        self.inputs = [x, y, z]
        self.outputs = [e2] 
Example #21
Source File: models.py    From D-VAE with MIT License 5 votes vote down vote up
def __init__(self):
        x, y, z = T.scalars('xyz')
        e = x * y
        op = th.OpFromGraph([x, y], [e])
        e2 = op(x, y) + z
        op2 = th.OpFromGraph([x, y, z], [e2])
        e3 = op2(x, y, z) + z

        self.inputs = [x, y, z]
        self.outputs = [e3] 
Example #22
Source File: test_scan_utils.py    From D-VAE with MIT License 5 votes vote down vote up
def test_opfromgraph(self):
        # as with the scan tests above, insert foreign inputs into the
        # inner graph.
        outer = tensor.scalar("outer")
        shared = theano.shared(
            numpy.array(1., dtype=theano.config.floatX),
            name="shared")
        constant = tensor.constant(1., name="constant")
        z = outer * (shared + constant)

        # construct the inner graph
        a = tensor.scalar()
        b = tensor.scalar()
        r = a + b
        r.tag.replacement = z * (a - b)

        # construct the outer graph
        c = tensor.scalar()
        d = tensor.scalar()
        u = theano.OpFromGraph([a, b], [r])(c, d)
        t = z * u
        v, = map_variables(self.replacer, [t])
        t2 = z * v

        f = theano.function([c, d, outer], [t, t2])
        for m, n in itertools.combinations(range(10), 2):
            assert f(m, n, outer=0.5) == [m + n, m - n]

        # test that the unsupported case of replacement with a shared
        # variable with updates crashes
        shared.update = shared + 1
        self.assertRaises(NotImplementedError,
                          map_variables, self.replacer, [t]) 
Example #23
Source File: builders.py    From D-VAE with MIT License 5 votes vote down vote up
def grad(self, inputs, output_grads):
        if hasattr(self, "grad_ops"):
            grad_ops = self.grad_ops
        else:
            gs = theano.gradient.grad(cost=None,
                                      known_grads=dict(izip(self.new_outputs,
                                                            output_grads)),
                                      wrt=self.new_inputs,
                                      disconnected_inputs='ignore')

            grad_ops = []
            for g in gs:
                if g is None:
                    grad_ops.append(lambda *args: None)
                else:
                    # It is normal if some inputs are not needed in order
                    # to compute the gradient, so we ignore them.
                    grad_ops.append(OpFromGraph(self.new_inputs + output_grads,
                                                [g],
                                                on_unused_input='ignore'))
            self.grad_ops = grad_ops

        return [go(*(inputs + output_grads)) for go in grad_ops]

# Since OpFromGraph contains a Theano compiled function, we should let
# DebugMode know about it