Python theano.tensor.transpose() Examples

The following are 30 code examples of theano.tensor.transpose(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: test_conv_cuda_ndarray.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_logical_shapes(self):
        # Logical shapes are not supported anymore, so we check that it
        # raises an Exception.
        for stride in range(1, 4):
            kshp = (10, 2, 10, 10)
            featshp = (3, 10, 11, 11)

            a = tensor.ftensor4()
            A = tensor.ftensor4()

            # Need to transpose first two dimensions of kernel, and reverse
            # index kernel image dims (for correlation)
            kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])

            featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
                               featshp[3] * stride)
            kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
            self.assertRaises(ValueError, tensor.nnet.conv2d,
                              a, kernel_rotated,
                              border_mode='full',
                              image_shape=featshp,
                              filter_shape=kshp_rotated,
                              imshp_logical=featshp_logical[1:],
                              kshp_logical=kshp[2:]) 
Example #2
Source File: sparse_gp_theano_internal.py    From sdvae with MIT License 6 votes vote down vote up
def compute_log_ei(self, x, incumbent):

        Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
        KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
        LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
        covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
        covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
        meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
        KzzInvcovCavity = T.dot(KzzInv, covCavity)
        KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
        Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
        B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 
        v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
        m_out = T.dot(Kxz, KzzInvmeanCavity)
        s = (incumbent - m_out) / T.sqrt(v_out)

        log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)

        return log_ei 
Example #3
Source File: sparse_gp_theano_internal.py    From D-VAE with MIT License 6 votes vote down vote up
def compute_log_ei(self, x, incumbent):

        Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
        KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
        LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
        covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
        covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
        meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
        KzzInvcovCavity = T.dot(KzzInv, covCavity)
        KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
        Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
        B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 
        v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
        m_out = T.dot(Kxz, KzzInvmeanCavity)
        s = (incumbent - m_out) / T.sqrt(v_out)

        log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)

        return log_ei 
Example #4
Source File: layers.py    From Neural-Photo-Editor with MIT License 6 votes vote down vote up
def get_output_for(self, input, deterministic=False, **kwargs):
        def _phase_shift(input,r):
            bsize,c,a,b = input.shape[0],1,self.output_shape[2]//r,self.output_shape[3]//r
            X = T.reshape(input, (bsize,r,r,a,b))
            X = T.transpose(X, (0, 3,4,1,2))  # bsize, a, b, r2,r1
            X = T.split(x=X,splits_size=[1]*a,n_splits=a,axis=1)  # a, [bsize, b, r, r]
            X = [T.reshape(x,(bsize,b,r,r))for x in X]
            X = T.concatenate(X,axis=2)  # bsize, b, a*r, r 
            X = T.split(x=X,splits_size =[1]*b,n_splits=b,axis=1)  # b, [bsize, a*r, r]
            X = [T.reshape(x,(bsize,a*r,r))for x in X]
            X = T.concatenate(X,axis=2) # bsize, a*r, b*r 
            return X.dimshuffle(0,'x',1,2)
        Xc = T.split(x=input,splits_size =[input.shape[1]//self.c]*self.c,n_splits=self.c,axis=1)
        return T.concatenate([_phase_shift(xc,self.r) for xc in Xc],axis=1)        

# Multiscale Dilated Convolution Block
# This function (not a layer in and of itself, though you could make it one) returns a set of concatenated conv2d and dilatedconv2d layers.
# Each layer uses the same basic filter W, operating at a different dilation factor (or taken as the mean of W for the 1x1 conv).
# The channel-wise output of each layer is weighted by a set of coefficients, which are initialized to 1 / the total number of dilation scales,
# meaning that were starting by taking an elementwise mean. These should be learnable parameters.

# NOTES: - I'm considering changing the variable names to be more descriptive, and look less like ridiculous academic code. It's on the to-do list.
#        - I keep the bias and nonlinearity out of the default definition for this layer, as I expect it to be batchnormed and nonlinearized in the model config. 
Example #5
Source File: gauss.py    From icml18-jtnn with MIT License 6 votes vote down vote up
def compute_kernel(lls, lsf, x, z):

    ls = T.exp(lls)
    sf = T.exp(lsf)

    if x.ndim == 1:
        x = x[ None, : ]

    if z.ndim == 1:
        z = z[ None, : ]

    lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)

    r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
        T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)

    k = sf * T.exp(-np.float32(0.5) * r2)

    return k 
Example #6
Source File: gauss.py    From icml18-jtnn with MIT License 6 votes vote down vote up
def compute_psi1(lls, lsf, xmean, xvar, z):

    if xmean.ndim == 1:
        xmean = xmean[ None, : ]

    ls = T.exp(lls)
    sf = T.exp(lsf)
    lspxvar = ls + xvar
    constterm1 = ls / lspxvar
    constterm2 = T.prod(T.sqrt(constterm1), 1)
    r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
        - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
        T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
    psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)

    return psi1 
Example #7
Source File: gauss.py    From D-VAE with MIT License 6 votes vote down vote up
def compute_psi1(lls, lsf, xmean, xvar, z):

    if xmean.ndim == 1:
        xmean = xmean[ None, : ]

    ls = T.exp(lls)
    sf = T.exp(lsf)
    lspxvar = ls + xvar
    constterm1 = ls / lspxvar
    constterm2 = T.prod(T.sqrt(constterm1), 1)
    r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
        - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
        T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
    psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)

    return psi1 
Example #8
Source File: gauss.py    From D-VAE with MIT License 6 votes vote down vote up
def compute_kernel(lls, lsf, x, z):

    ls = T.exp(lls)
    sf = T.exp(lsf)

    if x.ndim == 1:
        x = x[ None, : ]

    if z.ndim == 1:
        z = z[ None, : ]

    lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)

    r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
        T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)

    k = sf * T.exp(-np.float32(0.5) * r2)

    return k 
Example #9
Source File: test_conv_cuda_ndarray.py    From D-VAE with MIT License 6 votes vote down vote up
def test_logical_shapes(self):
        # Logical shapes are not supported anymore, so we check that it
        # raises an Exception.
        for stride in range(1, 4):
            kshp = (10, 2, 10, 10)
            featshp = (3, 10, 11, 11)

            a = tensor.ftensor4()
            A = tensor.ftensor4()

            # Need to transpose first two dimensions of kernel, and reverse
            # index kernel image dims (for correlation)
            kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])

            featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
                               featshp[3] * stride)
            kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
            self.assertRaises(ValueError, tensor.nnet.conv2d,
                              a, kernel_rotated,
                              border_mode='full',
                              image_shape=featshp,
                              filter_shape=kshp_rotated,
                              imshp_logical=featshp_logical[1:],
                              kshp_logical=kshp[2:]) 
Example #10
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_psi1(lls, lsf, xmean, xvar, z):

    if xmean.ndim == 1:
        xmean = xmean[ None, : ]

    ls = T.exp(lls)
    sf = T.exp(lsf)
    lspxvar = ls + xvar
    constterm1 = ls / lspxvar
    constterm2 = T.prod(T.sqrt(constterm1), 1)
    r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
        - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
        T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
    psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)

    return psi1 
Example #11
Source File: sparse_gp_theano_internal.py    From icml18-jtnn with MIT License 6 votes vote down vote up
def compute_log_ei(self, x, incumbent):

        Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
        KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
        LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
        covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
        covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
        meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
        KzzInvcovCavity = T.dot(KzzInv, covCavity)
        KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
        Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
        B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 
        v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
        m_out = T.dot(Kxz, KzzInvmeanCavity)
        s = (incumbent - m_out) / T.sqrt(v_out)

        log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)

        return log_ei 
Example #12
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_kernel(lls, lsf, x, z):

    ls = T.exp(lls)
    sf = T.exp(lsf)

    if x.ndim == 1:
        x = x[ None, : ]

    if z.ndim == 1:
        z = z[ None, : ]

    lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)

    r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
        T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)

    k = sf * T.exp(-np.float32(0.5) * r2)

    return k 
Example #13
Source File: theano_backend.py    From keras-contrib with MIT License 6 votes vote down vote up
def depth_to_space(input, scale, data_format=None):
    """Uses phase shift algorithm to convert
    channels/depth for spatial resolution
    """
    if data_format is None:
        data_format = image_data_format()
    data_format = data_format.lower()
    input = _preprocess_conv2d_input(input, data_format)

    b, k, row, col = input.shape
    out_channels = k // (scale ** 2)
    x = T.reshape(input, (b, scale, scale, out_channels, row, col))
    x = T.transpose(x, (0, 3, 4, 1, 5, 2))
    out = T.reshape(x, (b, out_channels, row * scale, col * scale))

    out = _postprocess_conv2d_output(out, input, None, None, None, data_format)
    return out 
Example #14
Source File: sparse_gp_theano_internal.py    From sdvae with MIT License 6 votes vote down vote up
def compute_log_ei(self, x, incumbent):

        Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
        KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
        LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
        covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
        covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
        meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
        KzzInvcovCavity = T.dot(KzzInv, covCavity)
        KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
        Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
        B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 
        v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
        m_out = T.dot(Kxz, KzzInvmeanCavity)
        s = (incumbent - m_out) / T.sqrt(v_out)

        log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)

        return log_ei 
Example #15
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_kernel(lls, lsf, x, z):

    ls = T.exp(lls)
    sf = T.exp(lsf)

    if x.ndim == 1:
        x = x[ None, : ]

    if z.ndim == 1:
        z = z[ None, : ]

    lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)

    r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
        T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)

    k = sf * T.exp(-np.float32(0.5) * r2)

    return k 
Example #16
Source File: sparse_gp_theano_internal.py    From sdvae with MIT License 6 votes vote down vote up
def compute_log_ei(self, x, incumbent):

        Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
        KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
        LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
        covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
        covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
        meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
        KzzInvcovCavity = T.dot(KzzInv, covCavity)
        KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
        Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
        B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 
        v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
        m_out = T.dot(Kxz, KzzInvmeanCavity)
        s = (incumbent - m_out) / T.sqrt(v_out)

        log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)

        return log_ei 
Example #17
Source File: __init__.py    From TextDetector with GNU General Public License v3.0 6 votes vote down vote up
def _format_as_impl(self, is_numeric, batch, space):
        if isinstance(space, VectorSpace):
            # We need to ensure that the resulting batch will always be
            # the same in `space`, no matter what the axes of `self` are.
            if self.axes != self.default_axes:
                # The batch index goes on the first axis
                assert self.default_axes[0] == 'b'
                batch = batch.transpose(*[self.axes.index(axis)
                                          for axis in self.default_axes])
            result = batch.reshape((batch.shape[0],
                                    self.get_total_dimension()))
            if space.sparse:
                result = _dense_to_sparse(result)

        elif isinstance(space, Conv2DSpace):
            result = Conv2DSpace.convert(batch, self.axes, space.axes)
        else:
            raise NotImplementedError("%s doesn't know how to format as %s"
                                      % (str(self), str(space)))

        return _cast(result, space.dtype) 
Example #18
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_kernel(lls, lsf, x, z):

    ls = T.exp(lls)
    sf = T.exp(lsf)

    if x.ndim == 1:
        x = x[ None, : ]

    if z.ndim == 1:
        z = z[ None, : ]

    lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)

    r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
        T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)

    k = sf * T.exp(-np.float32(0.5) * r2)

    return k 
Example #19
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_psi1(lls, lsf, xmean, xvar, z):

    if xmean.ndim == 1:
        xmean = xmean[ None, : ]

    ls = T.exp(lls)
    sf = T.exp(lsf)
    lspxvar = ls + xvar
    constterm1 = ls / lspxvar
    constterm2 = T.prod(T.sqrt(constterm1), 1)
    r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
        - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
        T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
    psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)

    return psi1 
Example #20
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_psi1(lls, lsf, xmean, xvar, z):

    if xmean.ndim == 1:
        xmean = xmean[ None, : ]

    ls = T.exp(lls)
    sf = T.exp(lsf)
    lspxvar = ls + xvar
    constterm1 = ls / lspxvar
    constterm2 = T.prod(T.sqrt(constterm1), 1)
    r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
        - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
        T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
    psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)

    return psi1 
Example #21
Source File: gauss.py    From sdvae with MIT License 6 votes vote down vote up
def compute_kernel(lls, lsf, x, z):

    ls = T.exp(lls)
    sf = T.exp(lsf)

    if x.ndim == 1:
        x = x[ None, : ]

    if z.ndim == 1:
        z = z[ None, : ]

    lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)

    r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
        T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)

    k = sf * T.exp(-np.float32(0.5) * r2)

    return k 
Example #22
Source File: sparse_gp_theano_internal.py    From sdvae with MIT License 6 votes vote down vote up
def compute_log_ei(self, x, incumbent):

        Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
        KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
        LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
        covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
        covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
        meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
        KzzInvcovCavity = T.dot(KzzInv, covCavity)
        KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
        Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
        B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 
        v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
        m_out = T.dot(Kxz, KzzInvmeanCavity)
        s = (incumbent - m_out) / T.sqrt(v_out)

        log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)

        return log_ei 
Example #23
Source File: encoders.py    From stick-breaking_dgms with MIT License 5 votes vote down vote up
def __init__(self, rng, input, batch_size, in_size, latent_size, W_a = None, W_b = None, epsilon = 0.01):
        self.srng = theano.tensor.shared_randomstreams.RandomStreams(rng.randint(999999))
        self.input = input
        
        # setup variational params
        if W_a is None:
            W_values = np.asarray(0.01 * rng.standard_normal(size=(in_size, latent_size-1)), dtype=theano.config.floatX)
            W_a = theano.shared(value=W_values, name='W_a')
        if W_b is None:
            W_values = np.asarray(0.01 * rng.standard_normal(size=(in_size, latent_size-1)), dtype=theano.config.floatX)
            W_b = theano.shared(value=W_values, name='W_b')
        self.W_a = W_a
        self.W_b = W_b

        # compute Kumaraswamy samples                                                                                                                                                      
        uniform_samples = T.cast(self.srng.uniform(size=(batch_size, latent_size-1), low=0.01, high=0.99), theano.config.floatX)
        self.a = Softplus(T.dot(self.input, self.W_a))
        self.b = Softplus(T.dot(self.input, self.W_b))
        v_samples = (1-(uniform_samples**(1/self.b)))**(1/self.a)

        # setup variables for recursion                                                                                                                                   
        stick_segment = theano.shared(value=np.zeros((batch_size,), dtype=theano.config.floatX), name='stick_segment')
        remaining_stick = theano.shared(value=np.ones((batch_size,), dtype=theano.config.floatX), name='remaining_stick')

        def compute_latent_vars(i, stick_segment, remaining_stick, v_samples):
            # compute stick segment                                                                                                     
            stick_segment = v_samples[:,i] * remaining_stick
            remaining_stick *= (1-v_samples[:,i])
            return (stick_segment, remaining_stick)

        (stick_segments, remaining_sticks), updates = theano.scan(fn=compute_latent_vars,
                                                                  outputs_info=[stick_segment, remaining_stick],sequences=T.arange(latent_size-1),
                                                                  non_sequences=[v_samples], strict=True)

        self.avg_used_dims = T.mean(T.sum(remaining_sticks > epsilon, axis=0))
        self.latent_vars = T.transpose(T.concatenate([stick_segments, T.shape_padaxis(remaining_sticks[-1, :],axis=1).T], axis=0))
        
        self.params = [self.W_a, self.W_b] 
Example #24
Source File: sparse_gp_theano_internal.py    From sdvae with MIT License 5 votes vote down vote up
def getLogNormalizerPosterior(self):

        assert self.covPosterior is not None and self.meanPosterior is not None and self.covPosteriorInv is not None

        return casting(0.5 * self.n_inducing_points * np.log(2 * np.pi)) + casting(0.5) * T.nlinalg.LogDetPSD()(self.covPosterior) + \
            casting(0.5) * T.dot(T.dot(T.transpose(self.meanPosterior), self.covPosteriorInv), self.meanPosterior)

    ##
    # We return the contribution to the energy of the node (See last Eq. of Sec. 4 in http://arxiv.org/pdf/1602.04133.pdf v1)
    # 
Example #25
Source File: math.py    From starry with MIT License 5 votes vote down vote up
def _cho_solve(cho_A, b):
    return _solve_upper(tt.transpose(cho_A), _solve_lower(cho_A, b)) 
Example #26
Source File: sparse_gp_theano_internal.py    From sdvae with MIT License 5 votes vote down vote up
def getLogNormalizerCavity(self):

        assert self.covCavity is not None  and self.meanCavity is not None and self.covCavityInv is not None 

        return casting(0.5 * self.n_inducing_points * np.log(2 * np.pi)) + casting(0.5) * T.nlinalg.LogDetPSD()(self.covCavity) + \
            casting(0.5) * T.dot(T.dot(T.transpose(self.meanCavity), self.covCavityInv), self.meanCavity) 
Example #27
Source File: gauss.py    From sdvae with MIT License 5 votes vote down vote up
def compute_psi2(lls, lsf, z, input_means, input_vars):

    ls = T.exp(lls)
    sf = T.exp(lsf)
    b = ls / casting(2.0)
    term_1 = T.prod(T.sqrt(b / (b + input_vars)), 1)

    scale = T.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))
    scaled_z = z[ None, : , : ] / scale[ : , None , : ]
    scaled_z_minus_m = scaled_z
    r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \
        2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
    term_2 = T.exp(-r2b)

    scale = T.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))
    scaled_z = z[ None, : , : ] / scale[ : , None , : ]
    scaled_m = input_means / scale
    scaled_m = T.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])
    scaled_z_minus_m = scaled_z - scaled_m
    r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \
        2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
    term_3 = T.exp(-r2b)
    
    psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3

    return T.transpose(psi2_computed, [ 1, 2, 0 ]) 
Example #28
Source File: paired_combinatorial_logit.py    From cs-ranking with Apache License 2.0 5 votes vote down vote up
def _get_probabilities_np(self, utility, lambda_k):
        n_objects = self.n_objects_fit_
        nests_indices = self.nests_indices
        n_nests = self.n_nests
        temp_lambdas = np.ones((n_objects, n_objects), lambda_k.dtype)
        temp_lambdas[nests_indices[:, 0], nests_indices[:, 1]] = temp_lambdas.T[
            nests_indices[:, 0], nests_indices[:, 1]
        ] = lambda_k
        uti_per_nest = np.transpose((utility[:, None] / temp_lambdas), (0, 2, 1))
        ind = np.array([[[i1, i2], [i2, i1]] for i1, i2 in nests_indices])
        ind = ind.reshape(2 * n_nests, 2)
        x = uti_per_nest[:, ind[:, 0], ind[:, 1]].reshape(-1, 2)
        log_sum_exp_nest = npu.logsumexp(x).reshape(-1, n_nests)
        pnk = np.exp(
            log_sum_exp_nest * lambda_k - npu.logsumexp(log_sum_exp_nest * lambda_k)
        )
        p = np.zeros(tuple(utility.shape), dtype=float)
        for i in range(n_nests):
            i1, i2 = nests_indices[i]
            p[:, i1] += (
                np.exp(uti_per_nest[:, i1, i2] - log_sum_exp_nest[:, i]) * pnk[:, i]
            )
            p[:, i2] += (
                np.exp(uti_per_nest[:, i2, i1] - log_sum_exp_nest[:, i]) * pnk[:, i]
            )
        return p 
Example #29
Source File: theano_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def transpose(x):
    y = T.transpose(x)
    if hasattr(x, '_keras_shape'):
        y._keras_shape = tuple(reversed(x._keras_shape))
    return y 
Example #30
Source File: gauss.py    From sdvae with MIT License 5 votes vote down vote up
def compute_psi2_numpy(lls, lsf, z, input_means, input_vars):

    ls = np.exp(lls)
    sf = np.exp(lsf)
    b = ls / casting(2.0)
    term_1 = np.prod(np.sqrt(b / (b + input_vars)), 1)

    scale = np.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))
    scaled_z = z[ None, : , : ] / scale[ : , None , : ]
    scaled_z_minus_m = scaled_z
    r2b = np.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + np.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \
        2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
    term_2 = np.exp(-r2b)

    scale = np.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))
    scaled_z = z[ None, : , : ] / scale[ : , None , : ]
    scaled_m = input_means / scale
    scaled_m = np.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])
    scaled_z_minus_m = scaled_z - scaled_m
    r2b = np.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + np.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \
        2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
    term_3 = np.exp(-r2b)
    
    psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3
    psi2_computed = np.transpose(psi2_computed, [ 1, 2, 0 ])

    return psi2_computed