Python theano.compile() Examples

The following are 30 code examples of theano.compile(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano , or try the search function .
Example #1
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def __init__(self, name, shared=tensor._shared,
                 sub=tensor.Subtensor,
                 inc_sub=tensor.IncSubtensor,
                 adv_sub1=tensor.AdvancedSubtensor1,
                 adv_incsub1=tensor.AdvancedIncSubtensor1,
                 mode=None,
                 dtype=theano.config.floatX,
                 type=tensor.TensorType,
                 ignore_topo=DeepCopyOp):
        self.shared = shared
        self.sub = sub
        self.inc_sub = inc_sub
        self.adv_sub1 = adv_sub1
        self.adv_incsub1 = adv_incsub1
        if mode is None:
            mode = theano.compile.mode.get_default_mode()
        self.mode = mode
        self.dtype = dtype
        self.type = type
        self.ignore_topo = ignore_topo
        self.fast_compile = theano.config.mode == 'FAST_COMPILE'
        self.ops = (sub, inc_sub, adv_sub1, adv_incsub1)
        return super(T_subtensor, self).__init__(name) 
Example #2
Source File: test_subtensor.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def __init__(self, name,
                 shared=tensor._shared,
                 sub=tensor.AdvancedSubtensor,
                 inc_sub=tensor.AdvancedIncSubtensor,
                 mode=None,
                 dtype=theano.config.floatX,
                 ignore_topo=DeepCopyOp):
        self.shared = shared
        self.sub = sub
        self.inc_sub = inc_sub
        if mode is None:
            mode = theano.compile.mode.get_default_mode()
        self.mode = mode
        self.dtype = dtype
        self.ignore_topo = ignore_topo
        super(TestAdvancedSubtensor, self).__init__(name) 
Example #3
Source File: test_subtensor.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def __init__(self, name, shared=tensor._shared,
                 sub=tensor.Subtensor,
                 inc_sub=tensor.IncSubtensor,
                 adv_sub1=tensor.AdvancedSubtensor1,
                 adv_incsub1=tensor.AdvancedIncSubtensor1,
                 mode=None,
                 dtype=theano.config.floatX,
                 type=tensor.TensorType,
                 ignore_topo=DeepCopyOp):
        self.shared = shared
        self.sub = sub
        self.inc_sub = inc_sub
        self.adv_sub1 = adv_sub1
        self.adv_incsub1 = adv_incsub1
        if mode is None:
            mode = theano.compile.mode.get_default_mode()
        self.mode = mode
        self.dtype = dtype
        self.type = type
        self.ignore_topo = ignore_topo
        self.fast_compile = theano.config.mode == 'FAST_COMPILE'
        self.ops = (sub, inc_sub, adv_sub1, adv_incsub1)
        return super(T_subtensor, self).__init__(name) 
Example #4
Source File: __init__.py    From D-VAE with MIT License 6 votes vote down vote up
def handle_shared_float32(tf):
    """
    Set the default shared type for float32 tensor to CudaNdarrayType.

    This function is intended to be called from use(gpu_index), not directly.

    """
    if tf:
        theano.compile.shared_constructor(float32_shared_constructor)
    else:
        theano.compile.shared_constructor(float32_shared_constructor, True)
        assert (float32_shared_constructor not in
                theano.compile.shared.constructors)

# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py 
Example #5
Source File: __init__.py    From D-VAE with MIT License 6 votes vote down vote up
def dnn_version():
    """Return the current cuDNN version we compile with.

    This returns a tuple with the header version and the library
    version we link with. For older cudnn version without version
    information, we return -1.

    """
    if not dnn_available():
        raise Exception(
            "We can't determine the cudnn version as it is not available",
            dnn_available.msg)

    if dnn_version.v is None:
        f = theano.function([], DnnVersion()(),
                            theano.Mode(optimizer=None),
                            profile=False)
        dnn_version.v = f()
    return dnn_version.v 
Example #6
Source File: __init__.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def dnn_version():
    """Return the current cuDNN version we compile with.

    This returns a tuple with the header version and the library
    version we link with. For older cudnn version without version
    information, we return -1.

    """
    if not dnn_available():
        raise Exception(
            "We can't determine the cudnn version as it is not available",
            dnn_available.msg)

    if dnn_version.v is None:
        f = theano.function([], DnnVersion()(),
                            theano.Mode(optimizer=None),
                            profile=False)
        dnn_version.v = f()
    return dnn_version.v 
Example #7
Source File: __init__.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def handle_shared_float32(tf):
    """
    Set the default shared type for float32 tensor to CudaNdarrayType.

    This function is intended to be called from use(gpu_index), not directly.

    """
    if tf:
        theano.compile.shared_constructor(float32_shared_constructor)
    else:
        theano.compile.shared_constructor(float32_shared_constructor, True)
        assert (float32_shared_constructor not in
                theano.compile.shared.constructors)

# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py 
Example #8
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def __init__(self, name,
                 shared=tensor._shared,
                 sub=tensor.AdvancedSubtensor,
                 inc_sub=tensor.AdvancedIncSubtensor,
                 mode=None,
                 dtype=theano.config.floatX,
                 ignore_topo=DeepCopyOp):
        self.shared = shared
        self.sub = sub
        self.inc_sub = inc_sub
        if mode is None:
            mode = theano.compile.mode.get_default_mode()
        self.mode = mode
        self.dtype = dtype
        self.ignore_topo = ignore_topo
        super(TestAdvancedSubtensor, self).__init__(name) 
Example #9
Source File: __init__.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def set_cuda_disabled():
    """
    Function used to disable cuda.

    A warning is displayed, so that the user is aware that cuda-based code is
    not going to work.
    Note that there is no point calling this function from outside of
    `cuda.__init__`, since it has no effect once the module is loaded.

    """
    global cuda_available, cuda_warning_is_displayed
    cuda_available = False

# cuda_ndarray compile and import 
Example #10
Source File: test_vm.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_reallocation():
    x = tensor.scalar('x')
    y = tensor.scalar('y')
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
              vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
        m = theano.compile.get_mode(theano.Mode(linker=l))
        m = m.excluding('fusion', 'inplace')

        f = theano.function([x, y], z, name="test_reduce_memory",
                            mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from theano.tensor.var import TensorConstant
            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if (storage_map[i][0] and
                                storage_map[i][0] is storage_map[o][0]):
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len(set(id(v) for v in
                       itervalues(storage_map))) < len(storage_map) 
Example #11
Source File: test_pfunc.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_duplicate_inputs(self):
        x = theano.tensor.lscalar('x')
        self.assertRaises(theano.compile.UnusedInputError,
                theano.function, [x, x, x], x) 
Example #12
Source File: test_debugmode.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_check_isfinite(self):
        x = theano.tensor.vector()
        f = theano.function([x], (x + 2) * 5, mode='DEBUG_MODE')
        g = theano.function([x], theano.tensor.log(x), mode='DEBUG_MODE')

        # this should work
        f(numpy.log([3, 4, 5]).astype(config.floatX))

        # if TensorType.filter_checks_isfinite were true, these would raise
        # ValueError
        # if not, DebugMode will check internally, and raise InvalidValueError
        # passing an invalid value as an input should trigger ValueError
        self.assertRaises(debugmode.InvalidValueError, f,
                numpy.log([3, -4, 5]).astype(config.floatX))
        self.assertRaises(debugmode.InvalidValueError, f,
                (numpy.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
        self.assertRaises(debugmode.InvalidValueError, f,
                (numpy.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))

        # generating an invalid value internally should trigger
        # InvalidValueError
        self.assertRaises(debugmode.InvalidValueError, g,
                numpy.asarray([3, -4, 5], dtype=config.floatX))

        # this should disable the exception
        theano.tensor.TensorType.filter_checks_isfinite = False
        theano.compile.mode.predefined_modes[
                'DEBUG_MODE'].check_isfinite = False
        # insert several Inf
        f(numpy.asarray(numpy.asarray([1.0, 1.0, 1.0]) / 0,
            dtype=config.floatX)) 
Example #13
Source File: test_debugmode.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def tearDown(self):
        theano.tensor.TensorType.filter_checks_isfinite = self.old_ts
        theano.compile.mode.predefined_modes[
                'DEBUG_MODE'].check_isfinite = self.old_dm 
Example #14
Source File: test_debugmode.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def setUp(self):
        self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
        self.old_dm = theano.compile.mode.predefined_modes[
                'DEBUG_MODE'].check_isfinite 
Example #15
Source File: test_function_module.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_empty_givens_updates():
    """
    Regression test for bug fixed in 8625e03.
    """
    # Empty givens / updates dictionaries were not properly detected before,
    # triggering useless crashes at compile time.
    x = T.scalar()
    y = x * 2
    function([theano.In(x)], y, givens={})
    function([theano.In(x)], y, updates={}) 
Example #16
Source File: test_function_module.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_constant_output(self):
        # Test that if the output is a constant, we respect the theano memory interface
        f = theano.function([], theano.tensor.constant([4]))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()
        # If the following 2 asserts fail it mean Theano broke it's memory contract.
        assert out2 is not out
        assert (out2 == 4).all()

        # Test that if the output is a constant and borrow, we respect the theano memory interface
        f = theano.function([], Out(theano.tensor.constant([4]), borrow=True))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()

        if isinstance(theano.compile.mode.get_default_mode(),
                      theano.compile.DebugMode):
            # In DebugMode, we don't implement optimization based on borrow on the output.
            assert (out2 == 4).all()
        else:
            assert out2 is out
            assert (out2 == 3).all() 
Example #17
Source File: nanguardmode.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
    """ compile utility function used by contains_nan and contains_inf
    """
    global f_gpumin, f_gpumax, f_gpuabsmax
    if not cuda.cuda_available:
        return
    guard_input = cuda.fvector('nan_guard')
    cuda_compile_failed = False
    if (nan_is_error or inf_is_error) and f_gpumin is None:
        try:
            f_gpumin = theano.function(
                [guard_input], T.min(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if inf_is_error and not cuda_compile_failed and f_gpumax is None:
        try:
            f_gpumax = theano.function(
                [guard_input], T.max(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
        try:
            f_gpuabsmax = theano.function(
                [guard_input], T.max(T.abs_(guard_input)),
                mode='FAST_RUN'
                )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True 
Example #18
Source File: test_type.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_filter_float():
    theano.compile.shared_constructor(gpuarray_shared_constructor)
    try:
        s = theano.shared(numpy.array(0.0, dtype='float32'),
                          target=test_ctx_name)
        theano.function([], updates=[(s, 0.0)])
    finally:
        del theano.compile.sharedvalue.shared.constructors[-1] 
Example #19
Source File: test_subtensor.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_1d_set_adv_selection(self):
        a = set_subtensor(self.v[self.adv1q], self.v[self.adv1q])

        assert a.type == self.v.type

        # TODO: compile a function and verify that the subtensor is removed
        #      completely, because the whole expression is redundant.

        f = theano.function([self.v, self.adv1q], a, allow_input_downcast=True)
        aval = f([.4, .9, .1], [1, 2])
        assert numpy.allclose(aval, [.4, 0.9, 0.1]) 
Example #20
Source File: test_type.py    From D-VAE with MIT License 5 votes vote down vote up
def test_filter_float():
    theano.compile.shared_constructor(gpuarray_shared_constructor)
    try:
        s = theano.shared(numpy.array(0.0, dtype='float32'),
                          target=test_ctx_name)
        theano.function([], updates=[(s, 0.0)])
    finally:
        del theano.compile.sharedvalue.shared.constructors[-1] 
Example #21
Source File: __init__.py    From D-VAE with MIT License 5 votes vote down vote up
def set_cuda_disabled():
    """
    Function used to disable cuda.

    A warning is displayed, so that the user is aware that cuda-based code is
    not going to work.
    Note that there is no point calling this function from outside of
    `cuda.__init__`, since it has no effect once the module is loaded.

    """
    global cuda_available, cuda_warning_is_displayed
    cuda_available = False

# cuda_ndarray compile and import 
Example #22
Source File: test_vm.py    From D-VAE with MIT License 5 votes vote down vote up
def test_reallocation():
    x = tensor.scalar('x')
    y = tensor.scalar('y')
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
              vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
        m = theano.compile.get_mode(theano.Mode(linker=l))
        m = m.excluding('fusion', 'inplace')

        f = theano.function([x, y], z, name="test_reduce_memory",
                            mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from theano.tensor.var import TensorConstant
            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if (storage_map[i][0] and
                                storage_map[i][0] is storage_map[o][0]):
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len(set(id(v) for v in
                       itervalues(storage_map))) < len(storage_map) 
Example #23
Source File: test_pfunc.py    From D-VAE with MIT License 5 votes vote down vote up
def test_duplicate_inputs(self):
        x = theano.tensor.lscalar('x')
        self.assertRaises(theano.compile.UnusedInputError,
                          theano.function, [x, x, x], x) 
Example #24
Source File: test_debugmode.py    From D-VAE with MIT License 5 votes vote down vote up
def test_check_isfinite(self):
        x = theano.tensor.vector()
        f = theano.function([x], (x + 2) * 5, mode='DEBUG_MODE')
        g = theano.function([x], theano.tensor.log(x), mode='DEBUG_MODE')

        # this should work
        f(numpy.log([3, 4, 5]).astype(config.floatX))

        # if TensorType.filter_checks_isfinite were true, these would raise
        # ValueError
        # if not, DebugMode will check internally, and raise InvalidValueError
        # passing an invalid value as an input should trigger ValueError
        self.assertRaises(debugmode.InvalidValueError, f,
                          numpy.log([3, -4, 5]).astype(config.floatX))
        self.assertRaises(debugmode.InvalidValueError, f,
                          (numpy.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
        self.assertRaises(debugmode.InvalidValueError, f,
                          (numpy.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))

        # generating an invalid value internally should trigger
        # InvalidValueError
        self.assertRaises(debugmode.InvalidValueError, g,
                          numpy.asarray([3, -4, 5], dtype=config.floatX))

        # this should disable the exception
        theano.tensor.TensorType.filter_checks_isfinite = False
        theano.compile.mode.predefined_modes[
            'DEBUG_MODE'].check_isfinite = False
        # insert several Inf
        f(numpy.asarray(numpy.asarray([1.0, 1.0, 1.0]) / 0,
                        dtype=config.floatX)) 
Example #25
Source File: test_debugmode.py    From D-VAE with MIT License 5 votes vote down vote up
def tearDown(self):
        theano.tensor.TensorType.filter_checks_isfinite = self.old_ts
        theano.compile.mode.predefined_modes[
            'DEBUG_MODE'].check_isfinite = self.old_dm 
Example #26
Source File: test_debugmode.py    From D-VAE with MIT License 5 votes vote down vote up
def setUp(self):
        self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
        self.old_dm = theano.compile.mode.predefined_modes[
            'DEBUG_MODE'].check_isfinite 
Example #27
Source File: test_function_module.py    From D-VAE with MIT License 5 votes vote down vote up
def test_empty_givens_updates():
    """
    Regression test for bug fixed in 8625e03.
    """
    # Empty givens / updates dictionaries were not properly detected before,
    # triggering useless crashes at compile time.
    x = T.scalar()
    y = x * 2
    function([theano.In(x)], y, givens={})
    function([theano.In(x)], y, updates={}) 
Example #28
Source File: test_function_module.py    From D-VAE with MIT License 5 votes vote down vote up
def test_constant_output(self):
        # Test that if the output is a constant, we respect the theano memory interface
        f = theano.function([], theano.tensor.constant([4]))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()
        # If the following 2 asserts fail it mean Theano broke it's memory contract.
        assert out2 is not out
        assert (out2 == 4).all()

        # Test that if the output is a constant and borrow, we respect the theano memory interface
        f = theano.function([], Out(theano.tensor.constant([4]), borrow=True))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()

        if isinstance(theano.compile.mode.get_default_mode(),
                      theano.compile.DebugMode):
            # In DebugMode, we don't implement optimization based on borrow on the output.
            assert (out2 == 4).all()
        else:
            assert out2 is out
            assert (out2 == 3).all() 
Example #29
Source File: nanguardmode.py    From D-VAE with MIT License 5 votes vote down vote up
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
    """ compile utility function used by contains_nan and contains_inf
    """
    global f_gpumin, f_gpumax, f_gpuabsmax
    if not cuda.cuda_available:
        return
    guard_input = cuda.fvector('nan_guard')
    cuda_compile_failed = False
    if (nan_is_error or inf_is_error) and f_gpumin is None:
        try:
            f_gpumin = theano.function(
                [guard_input], T.min(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if inf_is_error and not cuda_compile_failed and f_gpumax is None:
        try:
            f_gpumax = theano.function(
                [guard_input], T.max(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
        try:
            f_gpuabsmax = theano.function(
                [guard_input], T.max(T.abs_(guard_input)),
                mode='FAST_RUN'
                )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True 
Example #30
Source File: test_subtensor.py    From D-VAE with MIT License 5 votes vote down vote up
def test_1d_set_adv_selection(self):
        a = set_subtensor(self.v[self.adv1q], self.v[self.adv1q])

        assert a.type == self.v.type

        # TODO: compile a function and verify that the subtensor is removed
        #      completely, because the whole expression is redundant.

        f = theano.function([self.v, self.adv1q], a, allow_input_downcast=True)
        aval = f([.4, .9, .1], [1, 2])
        assert numpy.allclose(aval, [.4, 0.9, 0.1])