Python theano.tensor.inc_subtensor() Examples

The following are 30 code examples of theano.tensor.inc_subtensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #2
Source File: optimization.py    From rcnn with Apache License 2.0 6 votes vote down vote up
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
    has_momentum = momentum.get_value() > 0.0
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            if has_momentum:
                acc_slices = get_similar_subtensor(acc, indexes, p)
                new_acc = acc_slices*momentum + g
                updates[acc] = T.set_subtensor(acc_slices, new_acc)
            else:
                new_acc = g
            updates[origin] = T.inc_subtensor(p, - lr * new_acc)
        else:
            if has_momentum:
                new_acc = acc*momentum + g
                updates[acc] = new_acc
            else:
                new_acc = g
            updates[p] = p - lr * new_acc 
Example #3
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #4
Source File: optimization.py    From rcnn with Apache License 2.0 6 votes vote down vote up
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
            #updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
            # which one to use? 
Example #5
Source File: optimization.py    From rcnn with Apache License 2.0 6 votes vote down vote up
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d 
Example #6
Source File: test_basic_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_inc_subtensor():
    shared = cuda.shared_constructor
    #shared = tensor.shared
    x, y = T.fmatrices('x', 'y')
    xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                      dtype='float32')
    yval = numpy.asarray([[10, 10, 10], [10, 10, 10], [10, 10, 10]],
                      dtype='float32')
    expr = T.inc_subtensor(x[:, 1:3], y[:, 1:3])

    f = theano.function([x, y], expr, mode=mode_with_gpu)

    assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
                node.op.set_instead_of_inc == False
                for node in f.maker.fgraph.toposort()]) == 1
    utt.assert_allclose(f(xval, yval), [[1., 12., 13.],
                                        [4., 15., 16.], [7., 18., 19.]]) 
Example #7
Source File: test_opt.py    From D-VAE with MIT License 6 votes vote down vote up
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost) 
Example #8
Source File: test_inc_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_wrong_broadcast(self):
        a = tt.col()
        increment = tt.vector()

        # These symbolic graphs legitimate, as long as increment has exactly
        # one element. So it should fail at runtime, not at compile time.
        rng = numpy.random.RandomState(utt.fetch_seed())

        def rng_randX(*shape):
            return rng.rand(*shape).astype(theano.config.floatX)

        for op in (tt.set_subtensor, tt.inc_subtensor):
            for base in (a[:], a[0]):
                out = op(base, increment)
                f = theano.function([a, increment], out)
                # This one should work
                f(rng_randX(3, 1), rng_randX(1))
                # These ones should not
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(2))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(3))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(0)) 
Example #9
Source File: metrics.py    From ntm-one-shot with MIT License 6 votes vote down vote up
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], \
        nb_classes=5, nb_samples_per_class=10, batch_size=1):
    accuracy_0 = theano.shared(np.zeros((batch_size, nb_samples_per_class), \
        dtype=theano.config.floatX))
    indices_0 = theano.shared(np.zeros((batch_size, nb_classes), \
        dtype=np.int32))
    batch_range = T.arange(batch_size)
    def step_(p, t, acc, idx):
        acc = T.inc_subtensor(acc[batch_range, idx[batch_range, t]], T.eq(p, t))
        idx = T.inc_subtensor(idx[batch_range, t], 1)
        return (acc, idx)
    (raw_accuracy, _), _ = theano.foldl(step_, sequences=[predictions.dimshuffle(1, 0), \
        targets.dimshuffle(1, 0)], outputs_info=[accuracy_0, indices_0])
    accuracy = T.mean(raw_accuracy / nb_classes, axis=0)

    return accuracy 
Example #10
Source File: model.py    From text_convnet with MIT License 6 votes vote down vote up
def adagrad_update(self, cost, learning_rate, eps=1e-8):
        params = [ p if p != self.slices else self.EMB for p in self.params ]
        accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
                                                dtype=theano.config.floatX))
                         for p in params ]
        gparams = [ T.grad(cost, param) for param in self.params ]
        self.gparams = gparams
        updates = [ ]
        for param, gparam, acc in zip(self.params, gparams, accumulators):
            if param == self.slices:
                acc_slices = acc[self.x.flatten()]
                new_acc_slices = acc_slices + gparam**2
                updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) )
                updates.append( (self.EMB, T.inc_subtensor(param,
                                 - learning_rate * gparam / T.sqrt(new_acc_slices+eps))) )
            else:
                new_acc = acc + gparam**2
                updates.append( (acc, new_acc) )
                updates.append( (param, param - learning_rate * gparam /
                                    T.sqrt(new_acc + eps)) )
        return updates 
Example #11
Source File: nbow.py    From text_convnet with MIT License 6 votes vote down vote up
def adagrad_update(self, cost, learning_rate, eps=1e-8):
        params = [ p if p != self.slices else self.EMB for p in self.params ]
        accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
                                                dtype=theano.config.floatX))
                         for p in params ]
        gparams = [ T.grad(cost, param) for param in self.params ]
        self.gparams = gparams
        updates = [ ]
        for param, gparam, acc in zip(self.params, gparams, accumulators):
            if param == self.slices:
                acc_slices = acc[self.x.flatten()]
                new_acc_slices = acc_slices + gparam**2
                updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) )
                updates.append( (self.EMB, T.inc_subtensor(param,
                                 - learning_rate * gparam / T.sqrt(new_acc_slices+eps))) )
            else:
                new_acc = acc + gparam**2
                updates.append( (acc, new_acc) )
                updates.append( (param, param - learning_rate * gparam /
                                    T.sqrt(new_acc + eps)) )
        return updates 
Example #12
Source File: rnn.py    From TextDetector with GNU General Public License v3.0 6 votes vote down vote up
def fprop_step(self, state_below, state_before, U):
        """
        Scan function for case without masks

        Parameters
        ----------
        : todo
        state_below : TheanoTensor
        """

        g_on = tensor.inc_subtensor(
            state_below[:, self.dim:],
            tensor.dot(state_before, U[:, self.dim:])
        )
        r_on = tensor.nnet.sigmoid(g_on[:, self.dim:2*self.dim])
        u_on = tensor.nnet.sigmoid(g_on[:, 2*self.dim:])

        z_t = tensor.tanh(
            g_on[:, :self.dim] +
            tensor.dot(r_on * state_before, U[:, :self.dim])
        )
        z_t = u_on * state_before + (1. - u_on) * z_t

        return z_t 
Example #13
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #14
Source File: rnn.py    From TextDetector with GNU General Public License v3.0 6 votes vote down vote up
def fprop_step_mask(self, state_below, mask, state_before, U):
        """
        Scan function for case using masks

        Parameters
        ----------
        : todo
        state_below : TheanoTensor
        """

        g_on = tensor.inc_subtensor(
            state_below[:, self.dim:],
            tensor.dot(state_before, U[:, self.dim:])
        )
        r_on = tensor.nnet.sigmoid(g_on[:, self.dim:2*self.dim])
        u_on = tensor.nnet.sigmoid(g_on[:, 2*self.dim:])

        z_t = tensor.tanh(
            g_on[:, :self.dim] +
            tensor.dot(r_on * state_before, U[:, :self.dim])
        )
        z_t = u_on * state_before + (1. - u_on) * z_t
        z_t = mask[:, None] * z_t + (1 - mask[:, None]) * state_before

        return z_t 
Example #15
Source File: optimization.py    From aspect_adversarial with MIT License 6 votes vote down vote up
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d 
Example #16
Source File: gru4rec.py    From sars_tutorial with MIT License 6 votes vote down vote up
def rmsprop(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
        v1 = np.float32(self.adapt_params[0])
        v2 = np.float32(1.0 - self.adapt_params[0])
        acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        if sample_idx is None:
            acc_new = v1 * acc + v2 * grad ** 2
            updates[acc] = acc_new
        else:
            acc_s = acc[sample_idx]
            #            acc_new = v1 * acc_s + v2 * grad ** 2 #Faster, but inaccurate when an index occurs multiple times
            #            updates[acc] = T.set_subtensor(acc_s, acc_new) #Faster, but inaccurate when an index occurs multiple times
            updates[acc] = T.inc_subtensor(T.set_subtensor(acc_s, acc_s * v1)[sample_idx],
                                           v2 * grad ** 2)  # Slower, but accurate when an index occurs multiple times
            acc_new = updates[acc][sample_idx]  # Slower, but accurate when an index occurs multiple times
        gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX)
        return grad / gradient_scaling 
Example #17
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #18
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #19
Source File: test_inc_subtensor.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_wrong_broadcast(self):
        a = tt.col()
        increment = tt.vector()

        # These symbolic graphs legitimate, as long as increment has exactly
        # one element. So it should fail at runtime, not at compile time.
        rng = numpy.random.RandomState(utt.fetch_seed())

        def rng_randX(*shape):
            return rng.rand(*shape).astype(theano.config.floatX)

        for op in (tt.set_subtensor, tt.inc_subtensor):
            for base in (a[:], a[0]):
                out = op(base, increment)
                f = theano.function([a, increment], out)
                # This one should work
                f(rng_randX(3, 1), rng_randX(1))
                # These ones should not
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(2))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(3))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(0)) 
Example #20
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #21
Source File: test_opt.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost) 
Example #22
Source File: test_basic_ops.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_inc_subtensor():
    shared = cuda.shared_constructor
    #shared = tensor.shared
    x, y = T.fmatrices('x', 'y')
    xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                      dtype='float32')
    yval = numpy.asarray([[10, 10, 10], [10, 10, 10], [10, 10, 10]],
                      dtype='float32')
    expr = T.inc_subtensor(x[:, 1:3], y[:, 1:3])

    f = theano.function([x, y], expr, mode=mode_with_gpu)

    assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
                node.op.set_instead_of_inc == False
                for node in f.maker.fgraph.toposort()]) == 1
    utt.assert_allclose(f(xval, yval), [[1., 12., 13.],
                                        [4., 15., 16.], [7., 18., 19.]]) 
Example #23
Source File: theano_backend.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #24
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #25
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #26
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #27
Source File: theano_backend.py    From keras-lambda with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #28
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __count_sketch(h, s, v,  # Sequences
                   y, # Outputs info
                   ):
    return T.cast(T.inc_subtensor(y[:, h], T.dot(s, v)), 'float32')

# 1d Convolution 
Example #29
Source File: theano_graph_pro.py    From gempy with GNU Lesser General Public License v3.0 5 votes vote down vote up
def get_boundary_voxels(self, unique_val):
        uv_3d = T.cast(T.round(unique_val[0, :T.prod(self.regular_grid_res)].reshape(self.regular_grid_res, ndim=3)),
                       'int32')

        uv_l = T.horizontal_stack(uv_3d[1:, :, :].reshape((1, -1)),
                                  uv_3d[:, 1:, :].reshape((1, -1)),
                                  uv_3d[:, :, 1:].reshape((1, -1)))

        uv_r = T.horizontal_stack(uv_3d[:-1, :, :].reshape((1, -1)),
                                  uv_3d[:, :-1, :].reshape((1, -1)),
                                  uv_3d[:, :, :-1].reshape((1, -1)))

        shift = uv_l - uv_r
        select_edges = T.neq(shift.reshape((1, -1)), 0)
        select_edges_dir = select_edges.reshape((3, -1))

        select_voxels = T.zeros_like(uv_3d)
        select_voxels = T.inc_subtensor(select_voxels[1:, :, :],
                                        select_edges_dir[0].reshape((self.regular_grid_res - np.array([1, 0, 0])),
                                                                    ndim=3))
        select_voxels = T.inc_subtensor(select_voxels[:-1, :, :],
                                        select_edges_dir[0].reshape((self.regular_grid_res - np.array([1, 0, 0])),
                                                                    ndim=3))

        select_voxels = T.inc_subtensor(select_voxels[:, 1:, :],
                                        select_edges_dir[1].reshape((self.regular_grid_res - np.array([0, 1, 0])),
                                                                    ndim=3))
        select_voxels = T.inc_subtensor(select_voxels[:, :-1, :],
                                        select_edges_dir[1].reshape((self.regular_grid_res - np.array([0, 1, 0])),
                                                                    ndim=3))

        select_voxels = T.inc_subtensor(select_voxels[:, :, 1:],
                                        select_edges_dir[2].reshape((self.regular_grid_res - np.array([0, 0, 1])),
                                                                    ndim=3))
        select_voxels = T.inc_subtensor(select_voxels[:, :, :-1],
                                        select_edges_dir[2].reshape((self.regular_grid_res - np.array([0, 0, 1])),
                                                                    ndim=3))

        return select_voxels

    # region Geometry 
Example #30
Source File: generic_utils.py    From pixelCNN with MIT License 5 votes vote down vote up
def Skew(inputs, WIDTH, HEIGHT):
    """
    input.shape: (batch size, HEIGHT, WIDTH, num_channels)
    """
    buf = T.zeros(
        (inputs.shape[0], inputs.shape[1], 2*inputs.shape[2] - 1, inputs.shape[3]),
        theano.config.floatX
    )

    for i in xrange(HEIGHT):
        buf = T.inc_subtensor(buf[:, i, i:i+WIDTH, :], inputs[:,i,:,:])
    
    return buf