Python autograd.numpy.reshape() Examples

The following are 30 code examples of autograd.numpy.reshape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: deep_gaussian_process.py    From autograd with MIT License 6 votes vote down vote up
def callback(params):
        print("Log marginal likelihood {}".format(log_marginal_likelihood(params)))

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(-5, 5, 300), (300,1))
        pred_mean, pred_cov = combined_predict_fun(params, X, y, plot_xs)
        plot_gp(ax_end_to_end, X, y, pred_mean, pred_cov, plot_xs)
        ax_end_to_end.set_title("X to y")

        layer1_params, layer2_params, hiddens = unpack_all_params(params)
        h_star_mean, h_star_cov = predict_layer_funcs[0](layer1_params, X, hiddens, plot_xs)
        y_star_mean, y_star_cov = predict_layer_funcs[0](layer2_params, np.atleast_2d(hiddens).T, y, plot_xs)

        plot_gp(ax_x_to_h, X, hiddens,                  h_star_mean, h_star_cov, plot_xs)
        ax_x_to_h.set_title("X to hiddens")

        plot_gp(ax_h_to_y, np.atleast_2d(hiddens).T, y, y_star_mean, y_star_cov, plot_xs)
        ax_h_to_y.set_title("hiddens to y")

        plt.draw()
        plt.pause(1.0/60.0)

    # Initialize covariance parameters and hiddens. 
Example #2
Source File: ar.py    From autohmm with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _do_optim(self, p, optim_x0, gn, data, entries='all'):
        optim_bounds = [self.wrt_bounds[p] for k in
                        range(np.prod(self.wrt_dims[p]))]

        result = minimize(fun=self._optim_wrap,jac=True,
                          x0=np.array(optim_x0).reshape(-1),
                          args=(p,
                                {'wrt': p,
                                 'p': self.precision_,
                                 'm': self.mu_,
                                 'a': self.alpha_,
                                 'xn': data['obs'],
                                 'xln': data['lagged'],
                                 'gn': gn,  # post. uni. concat.
                                 'entries': entries
                                }),
                          bounds=optim_bounds,
                          method='TNC')
        new_value = result.x.reshape(self.wrt_dims[p])
        return new_value 
Example #3
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def outer_rows(X, Y):
    """
    Compute the outer product of each row in X, and Y.

    X: n x dx numpy array
    Y: n x dy numpy array

    Return an n x dx x dy numpy array.
    """

    # Matlab way to do this. According to Jonathan Huggins, this is not
    # efficient. Use einsum instead. See below.
    #n, dx = X.shape
    #dy = Y.shape[1]
    #X_col_rep = X[:, np.tile(range(dx), (dy, 1)).T.reshape(-1) ]
    #Y_tile = np.tile(Y, (1, dx))
    #Z = X_col_rep*Y_tile
    #return np.reshape(Z, (n, dx, dy))
    return np.einsum('ij,ik->ijk', X, Y) 
Example #4
Source File: autograd_wrapper.py    From mici with MIT License 6 votes vote down vote up
def jacobian_and_value(fun, x):
    """
    Makes a function that returns both the Jacobian and value of a function.

    Assumes that the function `fun` broadcasts along the first dimension of the
    input being differentiated with respect to such that a batch of outputs can
    be computed concurrently for a batch of inputs.
    """
    val = fun(x)
    v_vspace = vspace(val)
    x_vspace = vspace(x)
    x_rep = np.tile(x, (v_vspace.size,) + (1,) * x_vspace.ndim)
    vjp_rep, _ = make_vjp(fun, x_rep)
    jacobian_shape = v_vspace.shape + x_vspace.shape
    basis_vectors = np.array([b for b in v_vspace.standard_basis()])
    jacobian = vjp_rep(basis_vectors)
    return np.reshape(jacobian, jacobian_shape), val 
Example #5
Source File: autograd_wrapper.py    From mici with MIT License 6 votes vote down vote up
def hessian_grad_and_value(fun, x):
    """
    Makes a function that returns the Hessian, gradient & value of a function.

    Assumes that the function `fun` broadcasts along the first dimension of the
    input being differentiated with respect to such that a batch of outputs can
    be computed concurrently for a batch of inputs.
    """
    def grad_fun(x):
        vjp, val = make_vjp(fun, x)
        return vjp(vspace(val).ones()), val
    x_vspace = vspace(x)
    x_rep = np.tile(x, (x_vspace.size,) + (1,) * x_vspace.ndim)
    vjp_grad, (grad, val) = make_vjp(lambda x: atuple(grad_fun(x)), x_rep)
    hessian_shape = x_vspace.shape + x_vspace.shape
    basis_vectors = np.array([b for b in x_vspace.standard_basis()])
    hessian = vjp_grad((basis_vectors, vspace(val).zeros()))
    return np.reshape(hessian, hessian_shape), grad[0], val[0] 
Example #6
Source File: NeuralNetworks.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def forward_pass(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b
                
        return mu 
Example #7
Source File: RecurrentNeuralNetworks.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def forward_pass(self, X, hyp):     
        Q = self.hidden_dim
        H = np.zeros((X.shape[1],Q))
        
        idx_1 = 0
        idx_2 = idx_1 + self.X_dim*Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q*Q
        U = np.reshape(hyp[idx_1:idx_2], (self.X_dim,Q))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q))
        W = np.reshape(hyp[idx_3:idx_4], (Q,Q))
        
        for i in range(0, self.lags):
            H = activation(np.matmul(H,W) + np.matmul(X[i,:,:],U) + b)
                
        idx_1 = idx_4
        idx_2 = idx_1 + Q*self.Y_dim
        idx_3 = idx_2 + self.Y_dim
        V = np.reshape(hyp[idx_1:idx_2], (Q,self.Y_dim))
        c = np.reshape(hyp[idx_2:idx_3], (1,self.Y_dim))
        Y = np.matmul(H,V) + c
        
        return Y 
Example #8
Source File: wing.py    From autograd with MIT License 6 votes vote down vote up
def advect(f, vx, vy):
    """Move field f according to x and y velocities (u and v)
       using an implicit Euler integrator."""
    rows, cols = f.shape
    cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
    center_xs = (cell_xs - vx).ravel()
    center_ys = (cell_ys - vy).ravel()

    # Compute indices of source cells.
    left_ix = np.floor(center_ys).astype(int)
    top_ix  = np.floor(center_xs).astype(int)
    rw = center_ys - left_ix              # Relative weight of right-hand cells.
    bw = center_xs - top_ix               # Relative weight of bottom cells.
    left_ix  = np.mod(left_ix,     rows)  # Wrap around edges of simulation.
    right_ix = np.mod(left_ix + 1, rows)
    top_ix   = np.mod(top_ix,      cols)
    bot_ix   = np.mod(top_ix  + 1, cols)

    # A linearly-weighted sum of the 4 surrounding cells.
    flat_f = (1 - rw) * ((1 - bw)*f[left_ix,  top_ix] + bw*f[left_ix,  bot_ix]) \
                 + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
    return np.reshape(flat_f, (rows, cols)) 
Example #9
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradX_y(self, X, y):
        """
        Compute the gradient with respect to X (the first argument of the
        kernel). Base class provides a default autograd implementation for convenience.
        Subclasses should override if this does not work.

        X: nx x d numpy array.
        y: numpy array of length d.

        Return a numpy array G of size nx x d, the derivative of k(X, y) with
        respect to X.
        """
        yrow = np.reshape(y, (1, -1))
        f = lambda X: self.eval(X, yrow)
        g = autograd.elementwise_grad(f)
        G = g(X)
        assert G.shape[0] == X.shape[0]
        assert G.shape[1] == X.shape[1]
        return G

# end class KSTKernel 
Example #10
Source File: data.py    From autograd with MIT License 6 votes vote down vote up
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
                cmap=matplotlib.cm.binary, vmin=None, vmax=None):
    """Images should be a (N_images x pixels) matrix."""
    N_images = images.shape[0]
    N_rows = (N_images - 1) // ims_per_row + 1
    pad_value = np.min(images.ravel())
    concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
                             (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
    for i in range(N_images):
        cur_image = np.reshape(images[i, :], digit_dimensions)
        row_ix = i // ims_per_row
        col_ix = i % ims_per_row
        row_start = padding + (padding + digit_dimensions[0]) * row_ix
        col_start = padding + (padding + digit_dimensions[1]) * col_ix
        concat_images[row_start: row_start + digit_dimensions[0],
                      col_start: col_start + digit_dimensions[1]] = cur_image
    cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
    plt.xticks(np.array([]))
    plt.yticks(np.array([]))
    return cax 
Example #11
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def eval(self, X, Y):
        """
        Evaluate the Gaussian kernel on the two 2d numpy arrays.

        Parameters
        ----------
        X : n1 x d numpy array
        Y : n2 x d numpy array

        Return
        ------
        K : a n1 x n2 Gram matrix.
        """
        #(n1, d1) = X.shape
        #(n2, d2) = Y.shape
        #assert d1==d2, 'Dimensions of the two inputs must be the same'
        sumx2 = np.reshape(np.sum(X**2, 1), (-1, 1))
        sumy2 = np.reshape(np.sum(Y**2, 1), (1, -1))
        D2 = sumx2 - 2*np.dot(X, Y.T) + sumy2
        K = np.exp(old_div(-D2,(2.0*self.sigma2)))
        return K 
Example #12
Source File: bayesian_optimization.py    From autograd with MIT License 6 votes vote down vote up
def callback(X, y, predict_func, acquisition_function, next_point, new_value):
        plt.cla()

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(domain_min, domain_max, 300), (300,1))
        pred_mean, pred_std = predict_func(plot_xs)
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * pred_std,
                               (pred_mean + 1.96 * pred_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        ax.plot(X, y, 'kx')
        ax.plot(next_point, new_value, 'ro')

        alphas = acquisition_function(plot_xs)
        ax.plot(plot_xs, alphas, 'r')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1) 
Example #13
Source File: jacobians.py    From ceviche with MIT License 6 votes vote down vote up
def jacobian_numerical(fn, x, step_size=1e-7):
    """ numerically differentiate `fn` w.r.t. its argument `x` """
    in_array = float_2_array(x).flatten()
    out_array = float_2_array(fn(x)).flatten()

    m = in_array.size
    n = out_array.size
    shape = (n, m)
    jacobian = npa.zeros(shape)

    for i in range(m):
        input_i = in_array.copy()
        input_i[i] += step_size
        arg_i = input_i.reshape(in_array.shape)
        output_i = fn(arg_i).flatten()
        grad_i = (output_i - out_array) / step_size
        jacobian[:, i] = get_value_arr(get_value(grad_i))  # need to convert both the grad_i array and its contents to actual data.

    return jacobian 
Example #14
Source File: ConditionalVariationalAutoencoders.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def neural_net(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b

        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        Sigma = np.exp(np.matmul(H,A) + b)
        
        return mu, Sigma 
Example #15
Source File: ar.py    From autohmm with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _process_inputs(self, X, E=None, lengths=None):
        if self.n_features == 1:
            lagged = None
            if lengths is None:
                lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
                                original='ex')
            else:
                lagged = np.zeros((len(X), self.n_lags))
                for i, j in iter_from_X_lengths(X, lengths):
                    lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
                                            trim='forward', original='ex')

            return {'obs': X.reshape(-1,1),
                    'lagged': lagged.reshape(-1, self.n_features, self.n_lags)}
        else:
            lagged = None
            lagged = np.zeros((X.shape[0], self.n_features, self.n_lags))
            if lengths is None:
                tem = lagmat(X, maxlag=self.n_lags, trim='forward',
                             original='ex')
                for sample in range(X.shape[0]):
                    lagged[sample] = np.reshape\
                    (tem[sample], (self.n_features, self.n_lags), 'F')

            else:
                for i, j in iter_from_X_lengths(X, lengths):
                    lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
                                            trim='forward', original='ex')
                    lagged.reshape(-1, self.n_featurs, self.n_lags)

            return {'obs': X, 'lagged': lagged} 
Example #16
Source File: ar.py    From autohmm with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _obj_grad(self, wrt, m, p, a, xn, xln, gn, entries='all', **kwargs):
        m = m.reshape(self.n_unique, self.n_features, 1)  # tm

        if wrt == 'm':
            wrt_num = 0
        elif wrt == 'p':
            wrt_num = 1
        elif wrt == 'a':
            wrt_num = 2
        else:
            raise ValueError('unknown parameter')
        res = grad(self._obj, wrt_num)(m, p, a, xn, xln, gn)

        if wrt == 'p' and self.n_features > 1:
            if entries == 'diag':
                res_new = \
                np.zeros((self.n_unique, self.n_features, self.n_features))
                for u in range(self.n_unique):
                    for f in range(self.n_features):
                        res_new[u,f,f] = res[u,f,f]
                res = np.copy(res_new)

            elif entries == 'offdiag':
                for u in range(self.n_unique):
                    for f in range(self.n_features):
                        res[u,f,f] = 0.

        res = np.array([res])
        return res 
Example #17
Source File: ar.py    From autohmm with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _obj(self, m, p, a, xn, xln, gn, entries='all', **kwargs):

        ll = self._ll(m, p, a, xn, xln)

        mw = self.mu_weight_
        mp = self.mu_prior_
        pw = self.precision_weight_
        pp = self.precision_prior_
        m = m.reshape(self.n_unique, self.n_features, 1)  #tm
        mp = mp.reshape(self.n_unique, self.n_features, 1)  # tm

        prior = (pw-0.5) * np.log(p) - 0.5*p*(mw*(m-mp)**2 + 2*pp)
        res = -1*(np.sum(gn * ll) + np.sum(prior))

        return res 
Example #18
Source File: ar.py    From autohmm with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _ll(self, m, p, a, xn, xln, **kwargs):
        """Computation of log likelihood

        Dimensions
        ----------
        m :  n_unique x n_features
        p :  n_unique x n_features x n_features
        a :  n_unique x n_lags (shared_alpha=F)
             OR     1 x n_lags (shared_alpha=T)
        xn:  N x n_features
        xln: N x n_features x n_lags
        """

        samples = xn.shape[0]
        xn = xn.reshape(samples, 1, self.n_features)
        m = m.reshape(1, self.n_unique, self.n_features)
        det = np.linalg.det(np.linalg.inv(p))
        det = det.reshape(1, self.n_unique)

        lagged = np.dot(xln, a.T)  # NFU
        lagged = np.swapaxes(lagged, 1, 2)  # NUF
        xm = xn-(lagged + m)
        tem = np.einsum('NUF,UFX,NUX->NU', xm, p, xm)

        res = (-self.n_features/2.0)*np.log(2*np.pi) - 0.5*tem - 0.5*np.log(det)

        return res 
Example #19
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def feature_tensor(self, X):
        """
        Compute the feature tensor which is n x d x J.
        The feature tensor can be used to compute the statistic, and the
        covariance matrix for simulating from the null distribution.

        X: n x d data numpy array

        return an n x d x J numpy array
        """
        k = self.k
        J = self.V.shape[0]
        n, d = X.shape
        # n x d matrix of gradients
        grad_logp = self.p.grad_log(X)
        #assert np.all(util.is_real_num(grad_logp))
        # n x J matrix
        #print 'V'
        #print self.V
        K = k.eval(X, self.V)
        #assert np.all(util.is_real_num(K))

        list_grads = np.array([np.reshape(k.gradX_y(X, v), (1, n, d)) for v in self.V])
        stack0 = np.concatenate(list_grads, axis=0)
        #a numpy array G of size n x d x J such that G[:, :, J]
        #    is the derivative of k(X, V_j) with respect to X.
        dKdV = np.transpose(stack0, (1, 2, 0))

        # n x d x J tensor
        grad_logp_K = util.outer_rows(grad_logp, K)
        #print 'grad_logp'
        #print grad_logp.dtype
        #print grad_logp
        #print 'K'
        #print K
        Xi = old_div((grad_logp_K + dKdV),np.sqrt(d*J))
        #Xi = (grad_logp_K + dKdV)
        return Xi 
Example #20
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def simulate(self, gof, dat, fea_tensor=None):
        """
        fea_tensor: n x d x J feature matrix
        """
        assert isinstance(gof, FSSD)
        n_simulate = self.n_simulate
        seed = self.seed
        if fea_tensor is None:
            _, fea_tensor = gof.compute_stat(dat, return_feature_tensor=True)

        J = fea_tensor.shape[2]
        X = dat.data()
        n = X.shape[0]
        # n x d*J
        Tau = fea_tensor.reshape(n, -1)
        # Make sure it is a matrix i.e, np.cov returns a scalar when Tau is
        # 1d.
        cov = np.cov(Tau.T) + np.zeros((1, 1))
        #cov = Tau.T.dot(Tau/n)

        arr_nfssd, eigs = FSSD.list_simulate_spectral(cov, J, n_simulate,
                seed=self.seed)
        return {'sim_stats': arr_nfssd}

# end of FSSDH0SimCovObs
#----------------------- 
Example #21
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def simulate(self, gof, dat, fea_tensor=None):
        """
        fea_tensor: n x d x J feature matrix

        This method does not use dat.
        """
        dat = None
        #assert isinstance(gof, FSSD)
        # p = an UnnormalizedDensity
        p = gof.p
        ds = p.get_datasource()
        if ds is None:
            raise ValueError('DataSource associated with p must be available.')
        Xdraw = ds.sample(n=self.n_draw, seed=self.seed)
        _, fea_tensor = gof.compute_stat(Xdraw, return_feature_tensor=True)

        X = Xdraw.data()
        J = fea_tensor.shape[2]
        n = self.n_draw
        # n x d*J
        Tau = fea_tensor.reshape(n, -1)
        # Make sure it is a matrix i.e, np.cov returns a scalar when Tau is
        # 1d.
        #cov = np.cov(Tau.T) + np.zeros((1, 1))
        cov = old_div(Tau.T.dot(Tau),n) + np.zeros((1, 1))
        n_simulate = self.n_simulate

        arr_nfssd, eigs = FSSD.list_simulate_spectral(cov, J, n_simulate,
                seed=self.seed)
        return {'sim_stats': arr_nfssd}

# end of FSSDH0SimCovDraw
#----------------------- 
Example #22
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_reshape_method():
    A = npr.randn(5, 6, 4)
    def fun(x): return x.reshape((5 * 4, 6))
    check_grads(fun)(A) 
Example #23
Source File: wavelet.py    From scarlet with MIT License 5 votes vote down vote up
def iuwt(starlet):

    """ Inverse starlet transform

    Parameters
    ----------
    starlet: Shapelet object
        Starlet to be inverted

    Returns
    -------
    cJ: array
        a 2D image that corresponds to the inverse transform of stralet.
    """
    lvl, n1, n2 = np.shape(starlet)
    n = np.size(h)
    # Coarse scale
    cJ = fft.Fourier(starlet[-1, :, :])
    for i in np.arange(1, lvl):
        newh = np.zeros((n + (n - 1) * (2 ** (lvl - i - 1) - 1), 1))
        newh[0::2 ** (lvl - i - 1), 0] = h
        newhT = fft.Fourier(newh.T)
        newh = fft.Fourier(newh)

        # Line convolution
        cnew = fft.convolve(cJ, newh, axes=[0])
        # Column convolution
        cnew = fft.convolve(cnew, newhT, axes=[1])

        cJ = fft.Fourier(cnew.image + starlet[lvl - 1 - i, :, :])

    return np.reshape(cJ.image, (n1, n2)) 
Example #24
Source File: VariationalAutoencoders.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def neural_net(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b

        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        Sigma = np.exp(np.matmul(H,A) + b)
        
        return mu, Sigma 
Example #25
Source File: jacobians.py    From ceviche with MIT License 5 votes vote down vote up
def jacobian_forward(fun, x):
    """ Compute jacobian of fun with respect to x using forward mode differentiation"""
    jvp = make_jvp(fun, x)
    # ans = fun(x)
    val_grad = map(lambda b: jvp(b), vspace(x).standard_basis())
    vals, grads = zip(*val_grad)
    ans = npa.zeros((list(vals)[0].size,))  # fake answer so that dont have to compute it twice
    m, n = _jac_shape(x, ans)
    if _iscomplex(x):
        grads_real = npa.array(grads[::2])
        grads_imag = npa.array(grads[1::2])
        grads = grads_real - 1j * grads_imag
    return npa.reshape(npa.stack(grads), (m, n)).T 
Example #26
Source File: jacobians.py    From ceviche with MIT License 5 votes vote down vote up
def jacobian_reverse(fun, x):
    """ Compute jacobian of fun with respect to x using reverse mode differentiation"""
    vjp, ans = make_vjp(fun, x)
    grads = map(vjp, vspace(ans).standard_basis())
    m, n = _jac_shape(x, ans)
    return npa.reshape(npa.stack(grads), (n, m)) 
Example #27
Source File: fdfd.py    From ceviche with MIT License 5 votes vote down vote up
def _vec_to_grid(self, vec):
        """ converts a vector quantity into an array of the shape of the FDFD simulation """
        return npa.reshape(vec, self.shape) 
Example #28
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_reshape_method_nolist():
    # The reshape can be called in two different ways:
    # like A.reshape((5,4)) or A.reshape(5,4).
    # This test checks that we support the second way.
    A = npr.randn(5, 6, 4)
    def fun(x): return x.reshape(5 * 4, 6)
    check_grads(fun)(A) 
Example #29
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_reshape_call():
    A = npr.randn(5, 6, 4)
    def fun(x): return np.reshape(x, (5 * 4, 6))
    check_grads(fun)(A) 
Example #30
Source File: ar.py    From autohmm with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _set_alpha(self, alpha_val):
        # new val needs to have a 1st dim of length n_unique x n_lags
        # if shared_alpha is true, a shape of 1 x n_lags is possible, too
        # internally, n_components x n_lags
        alpha_new = np.zeros((self.n_components, self.n_lags))

        if alpha_val is not None:
            if alpha_val.ndim == 1:
                alpha_val = alpha_val.reshape(-1, 1)  # make sure 2nd dim exists

            if alpha_val.shape[1] != self.n_lags:
                raise ValueError("shape[1] does not match n_lags")

            if self.shared_alpha == False:
                # alpha is not shared
                if alpha_val.shape[0] != self.n_unique:
                    raise ValueError("shape[0] does not match n_unique")
                for u in range(self.n_unique):
                    for t in range(1+self.n_tied):
                        alpha_new[u*(1+self.n_tied)+t, :] = alpha_val[u, :].copy()
            else:
                # alpha is shared ...
                if alpha_val.shape[0] != self.n_unique and \
                  alpha_val.shape[0] != 1:
                    # ... the shape should either be 1 x L or U x L
                    raise ValueError("shape[0] is neither 1 nor does it match n_unique")
                if alpha_val.shape[0] == self.n_unique and \
                  not (alpha_val == alpha_val[0,:]).all():
                    # .. in case of U x L the rows need to be identical
                    raise ValueError("rows not identical (shared_alpha)")
                for u in range(self.n_unique):
                    for t in range(1+self.n_tied):
                        alpha_new[u*(1+self.n_tied)+t, :] = alpha_val[0, :].copy()

        self._alpha_ = alpha_new