Python theano.ifelse.ifelse() Examples

The following are 30 code examples of theano.ifelse.ifelse(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.ifelse , or try the search function .
Example #1
Source File: theano_utils.py    From pysaliency with MIT License 6 votes vote down vote up
def __init__(self, input, centerbias = None, alpha=1.0):
        self.input = input
        if centerbias is None:
            centerbias = np.ones(12)
        self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha')
        self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys')
        self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs')

        height = T.cast(input.shape[0], theano.config.floatX)
        width = T.cast(input.shape[1], theano.config.floatX)
        x_coords = (T.arange(width) - 0.5*width) / (0.5*width)
        y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001  # We cannot have zeros in there because of grad

        x_coords = x_coords.dimshuffle('x', 0)
        y_coords = y_coords.dimshuffle(0, 'x')

        dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords))
        self.max_dist = T.sqrt(1 + self.alpha)
        self.dists = dists/self.max_dist

        self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias))

        apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2)
        self.output = ifelse(apply_centerbias, self.input+self.factors, self.input)
        self.params = [self.centerbias_ys, self.alpha] 
Example #2
Source File: test_ifelse.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_sparse_tensor_error(self):
        import theano.sparse
        if not theano.sparse.enable_sparse:
            raise SkipTest("Optimization temporarily disabled")
        rng = numpy.random.RandomState(utt.fetch_seed())
        data = rng.rand(2, 3).astype(self.dtype)
        x = self.shared(data)
        y = theano.sparse.matrix('csc', dtype=self.dtype, name='y')
        z = theano.sparse.matrix('csr', dtype=self.dtype, name='z')
        cond = theano.tensor.iscalar('cond')

        self.assertRaises(TypeError, ifelse, cond, x, y)
        self.assertRaises(TypeError, ifelse, cond, y, x)
        self.assertRaises(TypeError, ifelse, cond, x, z)
        self.assertRaises(TypeError, ifelse, cond, z, x)
        self.assertRaises(TypeError, ifelse, cond, y, z)
        self.assertRaises(TypeError, ifelse, cond, z, y) 
Example #3
Source File: theano_utils.py    From pysaliency with MIT License 6 votes vote down vote up
def __init__(self, input, centerbias = None, alpha=1.0):
        self.input = input
        if centerbias is None:
            centerbias = np.ones(12)
        self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha')
        self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys')
        self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs')

        height = T.cast(input.shape[0], theano.config.floatX)
        width = T.cast(input.shape[1], theano.config.floatX)
        x_coords = (T.arange(width) - 0.5*width) / (0.5*width)
        y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001  # We cannot have zeros in there because of grad

        x_coords = x_coords.dimshuffle('x', 0)
        y_coords = y_coords.dimshuffle(0, 'x')

        dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords))
        self.max_dist = T.sqrt(1 + self.alpha)
        self.dists = dists/self.max_dist

        self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias))

        apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2)
        self.output = ifelse(apply_centerbias, self.input*self.factors, self.input)
        self.params = [self.centerbias_ys, self.alpha] 
Example #4
Source File: test_vm.py    From D-VAE with MIT License 6 votes vote down vote up
def test_c_thunks():
    a = tensor.scalars('a')
    b, c = tensor.vectors('bc')
    cases = [False]
    if theano.config.cxx:
        cases.append(True)
    for c_thunks in cases:
        f = function([a, b, c], ifelse(a, a * b, b * c),
                     mode=Mode(
                         optimizer=None,
                         linker=vm.VM_Linker(c_thunks=c_thunks,
                                             use_cloop=False)))
        f(1, [2], [3, 2])
        from nose.tools import assert_raises
        assert_raises(ValueError, f, 0, [2], [3, 4])
        assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks 
Example #5
Source File: DropoutHiddenLayer.py    From SurvivalNet with Apache License 2.0 6 votes vote down vote up
def __init__(self, rng, input, n_in, n_out, is_train,
                 activation, dropout_rate, mask=None, W=None, b=None):
        super(DropoutHiddenLayer, self).__init__(
                rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
                activation=activation)

        self.dropout_rate = dropout_rate
        self.srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
        self.mask = mask
        self.layer = self.output

        # Computes outputs for train and test phase applying dropout when needed.
        train_output = self.layer * T.cast(self.mask, theano.config.floatX)
        test_output = self.output * (1 - dropout_rate)
        self.output = ifelse(T.eq(is_train, 1), train_output, test_output)
        return 
Example #6
Source File: test_ifelse.py    From D-VAE with MIT License 6 votes vote down vote up
def test_lazy_if(self):
        # Tests that lazy if works .. even if the two results have different
        # shapes but the same type (i.e. both vectors, or matrices or
        # whatnot of same dtype)
        x = tensor.vector('x', dtype=self.dtype)
        y = tensor.vector('y', dtype=self.dtype)
        c = tensor.iscalar('c')
        f = theano.function([c, x, y], ifelse(c, x, y), mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(1))
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
        vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)

        assert numpy.allclose(vx, f(1, vx, vy))
        assert numpy.allclose(vy, f(0, vx, vy)) 
Example #7
Source File: aggregation.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def get_aggregator(self):
        initialized = shared_like(0.)
        expression_acc = shared_like(self.expression)

        # Dummy default expression to use as the previously-accumulated
        # value, that has the same shape as the new result
        expression_zeros = tensor.as_tensor(self.expression).zeros_like()

        conditional_update_expr = self.expression + ifelse(initialized,
                                                           expression_acc,
                                                           expression_zeros)

        initialization_updates = [(expression_acc,
                                   tensor.zeros_like(expression_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(expression_acc,
                                 conditional_update_expr),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(expression_acc))
        return aggregator 
Example #8
Source File: test_ifelse.py    From D-VAE with MIT License 6 votes vote down vote up
def test_sparse_tensor_error(self):
        import theano.sparse
        if not theano.sparse.enable_sparse:
            raise SkipTest("Optimization temporarily disabled")
        rng = numpy.random.RandomState(utt.fetch_seed())
        data = rng.rand(2, 3).astype(self.dtype)
        x = self.shared(data)
        y = theano.sparse.matrix('csc', dtype=self.dtype, name='y')
        z = theano.sparse.matrix('csr', dtype=self.dtype, name='z')
        cond = theano.tensor.iscalar('cond')

        self.assertRaises(TypeError, ifelse, cond, x, y)
        self.assertRaises(TypeError, ifelse, cond, y, x)
        self.assertRaises(TypeError, ifelse, cond, x, z)
        self.assertRaises(TypeError, ifelse, cond, z, x)
        self.assertRaises(TypeError, ifelse, cond, y, z)
        self.assertRaises(TypeError, ifelse, cond, z, y) 
Example #9
Source File: test_ifelse.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_lazy_if(self):
        # Tests that lazy if works .. even if the two results have different
        # shapes but the same type (i.e. both vectors, or matrices or
        # whatnot of same dtype)
        x = tensor.vector('x', dtype=self.dtype)
        y = tensor.vector('y', dtype=self.dtype)
        c = tensor.iscalar('c')
        f = theano.function([c, x, y], ifelse(c, x, y), mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(1))
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
        vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)

        assert numpy.allclose(vx, f(1, vx, vy))
        assert numpy.allclose(vy, f(0, vx, vy)) 
Example #10
Source File: DropoutHiddenLayer.py    From Deep-Neural-Networks-HealthCare with MIT License 6 votes vote down vote up
def __init__(self, rng, input, n_in, n_out, is_train,
                 activation, dropout_rate, mask=None, W=None, b=None):
        super(DropoutHiddenLayer, self).__init__(
                rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
                activation=activation)

        self.dropout_rate = dropout_rate
        self.srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
        self.mask = mask
        self.layer = self.output

        # Computes outputs for train and test phase applying dropout when needed.
        train_output = self.layer * T.cast(self.mask, theano.config.floatX)
        test_output = self.output * (1 - dropout_rate)
        self.output = ifelse(T.eq(is_train, 1), train_output, test_output)
        return 
Example #11
Source File: test_vm.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_c_thunks():
    a = tensor.scalars('a')
    b, c = tensor.vectors('bc')
    cases = [False]
    if theano.config.cxx:
        cases.append(True)
    for c_thunks in cases:
        f = function([a, b, c], ifelse(a, a * b, b * c),
                     mode=Mode(
                         optimizer=None,
                         linker=vm.VM_Linker(c_thunks=c_thunks,
                                             use_cloop=False)))
        f(1, [2], [3, 2])
        from nose.tools import assert_raises
        assert_raises(ValueError, f, 0, [2], [3, 4])
        assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks 
Example #12
Source File: layers.py    From gated_word_char_rlm with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a gate layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string, layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """      
    # compute gating values, Eq.(3)
    G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),  
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)   
    return eval(activ)(X) 
Example #13
Source File: layers.py    From gated_word_char_rlm with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def concat_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a concat layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string,  layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               tensor.dot(tensor.concatenate([X_word, X_char], axis=2), tparams[p_name(prefix, 'W')]) + tparams[p_name(prefix, 'b')]) 
    return eval(activ)(X) 
Example #14
Source File: layer.py    From deep_srl with Apache License 2.0 5 votes vote down vote up
def connect(self, inputs, is_train):
    """ Trick to speed up model compiling at decoding time.
        (Avoids building a complicated CG.)
    """
    if not self.fix_mask:
      self.generate_mask(inputs.shape, is_train)
     
    if self.fast_predict:
      return inputs * (1 - self.dropout_prob)

    return ifelse(is_train,
            inputs * self.dropout_mask,
            inputs * (1 - self.dropout_prob)) 
Example #15
Source File: linesearch.py    From TextDetector with GNU General Public License v3.0 5 votes vote down vote up
def lazy_or(name='none', *args):
    """
    .. todo::

        WRITEME
    """
    def apply_me(args):
        if len(args) == 1:
            return args[0]
        else:
            rval = ifelse(args[0], true, apply_me(args[1:]),
                          name=name + str(len(args)))
            return rval
    return apply_me(args) 
Example #16
Source File: linesearch.py    From TextDetector with GNU General Public License v3.0 5 votes vote down vote up
def lazy_and(name='node', *args):
    """
    .. todo::

        WRITEME
    """
    def apply_me(args):
        if len(args) == 1:
            return args[0]
        else:
            rval = ifelse(TT.eq(args[0], zero), false, apply_me(args[1:]),
                         name=name + str(len(args)))
            return rval
    return apply_me(args) 
Example #17
Source File: delayed_trainers.py    From deepy with MIT License 5 votes vote down vote up
def learning_updates(self):
        batch_counter = theano.shared(np.array(0, dtype="int32"), "batch_counter")
        batch_size = self.batch_size
        to_update = batch_counter >= batch_size

        for param in self.network.parameters:
            # delta = self.learning_rate * T.grad(self.J, param)
            gsum = theano.shared(np.zeros(param.get_value().shape, dtype=FLOATX), "batch_gsum_%s" % param.name)
            yield gsum, ifelse(to_update, T.zeros_like(gsum), gsum + T.grad(self.cost, param))
            delta = self.learning_rate * gsum / batch_size
            yield param, ifelse(to_update, param - delta, param)

        yield batch_counter, ifelse(to_update, T.constant(0, dtype="int32"), batch_counter + 1) 
Example #18
Source File: functions.py    From deepy with MIT License 5 votes vote down vote up
def ifelse(condition, then_branch, else_branch):
    return theano_ifelse(condition, then_branch, else_branch) 
Example #19
Source File: runtime.py    From deepy with MIT License 5 votes vote down vote up
def iftrain(self, then_branch, else_branch):
        """
        Execute `then_branch` when training.
        """
        return ifelse(self._training_flag, then_branch, else_branch, name="iftrain") 
Example #20
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_ndim_mismatch(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        data = rng.rand(5).astype(self.dtype)
        x = self.shared(data)
        y = tensor.col('y', self.dtype)
        cond = theano.tensor.iscalar('cond')

        self.assertRaises(TypeError, ifelse, cond, x, y)
        self.assertRaises(TypeError, ifelse, cond, y, x) 
Example #21
Source File: theano_utils.py    From pysaliency with MIT License 5 votes vote down vote up
def __init__(self, input, sigma=20.0, window_radius=60):
        self.input = input
        self.sigma = theano.shared(value=np.array(sigma, dtype=theano.config.floatX), name='sigma')
        apply_blur = T.gt(self.sigma, 0.0)
        no_blur = T.le(self.sigma, 0.0)
        self.output = ifelse(no_blur, input, gaussian_filter(input.dimshuffle('x', 0, 1), self.sigma, window_radius)[0, :, :])
        self.params = [self.sigma] 
Example #22
Source File: aggregation.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def get_aggregator(self):
        initialized = shared_like(0.)
        numerator_acc = shared_like(self.numerator)
        denominator_acc = shared_like(self.denominator)

        # Dummy default expression to use as the previously-accumulated
        # value, that has the same shape as the new result
        numerator_zeros = tensor.as_tensor(self.numerator).zeros_like()
        denominator_zeros = tensor.as_tensor(self.denominator).zeros_like()

        conditional_update_num = self.numerator + ifelse(initialized,
                                                         numerator_acc,
                                                         numerator_zeros)
        conditional_update_den = self.denominator + ifelse(initialized,
                                                           denominator_acc,
                                                           denominator_zeros)

        initialization_updates = [(numerator_acc,
                                   tensor.zeros_like(numerator_acc)),
                                  (denominator_acc,
                                   tensor.zeros_like(denominator_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(numerator_acc,
                                 conditional_update_num),
                                (denominator_acc,
                                 conditional_update_den),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(numerator_acc /
                                                  denominator_acc))
        return aggregator 
Example #23
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_grad_test_values(self):
        """
        Regression test for test values of `ifelse` gradient.
        """
        backup = theano.config.compute_test_value
        theano.config.compute_test_value = 'raise'
        try:
            x = tensor.scalar('x')
            x.tag.test_value = 1
            # Used to crash due to undefined test value.
            tensor.grad(ifelse(0, x, x), x)
        finally:
            theano.config.compute_test_value = backup 
Example #24
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_merge_ifs_true_false(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        x2 = tensor.scalar('x2')
        y1 = tensor.scalar('y1')
        y2 = tensor.scalar('y2')
        w1 = tensor.scalar('w1')
        w2 = tensor.scalar('w2')
        c = tensor.iscalar('c')

        out = ifelse(c,
                     ifelse(c, x1, x2) + ifelse(c, y1, y2) + w1,
                     ifelse(c, x1, x2) + ifelse(c, y1, y2) + w2)
        f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
                            allow_input_downcast=True)
        assert len([x for x in f.maker.fgraph.toposort()
                    if isinstance(x.op, IfElse)]) == 1

        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
                              vx1 + vy1 + vw1)
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
                              vx2 + vy2 + vw2) 
Example #25
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_pushout2(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        x2 = tensor.scalar('x2')
        y1 = tensor.scalar('y1')
        y2 = tensor.scalar('y2')
        w1 = tensor.scalar('w1')
        w2 = tensor.scalar('w2')
        c = tensor.iscalar('c')
        x, y = ifelse(c, (x1, y1), (x2, y2), name='f1')
        z = ifelse(x > y, w1, w2, name='f2')
        out = x * z * y

        f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
                            allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()
        if vx1 > vy1:
            vw = vw1
        else:
            vw = vw2
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
                              vx1 * vy1 * vw)

        if vx2 > vy2:
            vw = vw1
        else:
            vw = vw2
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
                              vx2 * vy2 * vw) 
Example #26
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_pushout1(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        x2 = tensor.scalar('x2')
        y1 = tensor.scalar('y1')
        y2 = tensor.scalar('y2')
        w1 = tensor.scalar('w1')
        w2 = tensor.scalar('w2')
        c = tensor.iscalar('c')
        x, y = ifelse(c, (x1, y1), (x2, y2), name='f1')
        z = ifelse(c, w1, w2, name='f2')
        out = x * z * y

        f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
                            allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()

        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
                              vx1 * vy1 * vw1)
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
                              vx2 * vy2 * vw2) 
Example #27
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_remove_useless_inputs2(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.vector('x1')
        x2 = tensor.vector('x2')
        y1 = tensor.vector('y1')
        y2 = tensor.vector('y2')
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x1, x1, x2, x2), (y1, y1, y2, y2, y2))
        f = theano.function([c, x1, x2, y1, y2], z)

        ifnode = [x for x in f.maker.fgraph.toposort()
                  if isinstance(x.op, IfElse)][0]
        assert len(ifnode.outputs) == 3 
Example #28
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_remove_useless_inputs1(self):
        raise SkipTest("Optimization temporarily disabled")
        x = tensor.vector('x')
        y = tensor.vector('y')
        c = tensor.iscalar('c')
        z = ifelse(c, (x, x), (y, y))
        f = theano.function([c, x, y], z)

        ifnode = [n for n in f.maker.fgraph.toposort()
                  if isinstance(n.op, IfElse)][0]
        assert len(ifnode.inputs) == 3 
Example #29
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_merge(self):
        raise SkipTest("Optimization temporarily disabled")
        x = tensor.vector('x')
        y = tensor.vector('y')
        c = tensor.iscalar('c')
        z1 = ifelse(c, x + 1, y + 1)
        z2 = ifelse(c, x + 2, y + 2)
        z = z1 + z2
        f = theano.function([c, x, y], z)
        assert len([n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, IfElse)]) == 1 
Example #30
Source File: test_ifelse.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_multiple_out_grad(self):
        # Tests that we can compute the gradients through lazy if
        x1 = tensor.vector('x1')
        x2 = tensor.vector('x2')
        y1 = tensor.vector('y1')
        y2 = tensor.vector('y2')
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x2), (y1, y2))
        grads = tensor.grad(z[0].sum() + z[1].sum(),
                            [x1, x2, y1, y2])

        f = theano.function([c, x1, x2, y1, y2], grads)
        rng = numpy.random.RandomState(utt.fetch_seed())

        lens = [rng.randint(200) for i in range(4)]
        values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX)
                  for l in lens]
        outs_1 = f(1, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_1[0] == 1.)
        assert numpy.all(outs_1[1] == 1.)
        assert numpy.all(outs_1[2] == 0.)
        assert numpy.all(outs_1[3] == 0.)

        outs_0 = f(0, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_0[0] == 0.)
        assert numpy.all(outs_0[1] == 0.)
        assert numpy.all(outs_0[2] == 1.)
        assert numpy.all(outs_0[3] == 1.)