Python autograd.numpy.concatenate() Examples

The following are 30 code examples of autograd.numpy.concatenate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: ConditionalVariationalAutoencoders.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def initialize_NN(self, Q):
        hyp = np.array([])
        layers = Q.shape[0]
        for layer in range(0,layers-2):
            A = -np.sqrt(6.0/(Q[layer]+Q[layer+1])) + 2.0*np.sqrt(6.0/(Q[layer]+Q[layer+1]))*np.random.rand(Q[layer],Q[layer+1])
            b = np.zeros((1,Q[layer+1]))
            hyp = np.concatenate([hyp, A.ravel(), b.ravel()])

        A = -np.sqrt(6.0/(Q[-2]+Q[-1])) + 2.0*np.sqrt(6.0/(Q[-2]+Q[-1]))*np.random.rand(Q[-2],Q[-1])
        b = np.zeros((1,Q[-1]))
        hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        A = -np.sqrt(6.0/(Q[-2]+Q[-1])) + 2.0*np.sqrt(6.0/(Q[-2]+Q[-1]))*np.random.rand(Q[-2],Q[-1])
        b = np.zeros((1,Q[-1]))
        hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        return hyp 
Example #2
Source File: zdt.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _calc_pareto_front(self, n_points=100, flatten=True):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pf = []

        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pf.append(anp.array([x1, x2]).T)

        if not flatten:
            pf = anp.concatenate([pf[None,...] for pf in pf])
        else:
            pf = anp.row_stack(pf)

        return pf 
Example #3
Source File: size_history.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
Example #4
Source File: VariationalAutoencoders.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def initialize_NN(self, Q):
        hyp = np.array([])
        layers = Q.shape[0]
        for layer in range(0,layers-2):
            A = -np.sqrt(6.0/(Q[layer]+Q[layer+1])) + 2.0*np.sqrt(6.0/(Q[layer]+Q[layer+1]))*np.random.rand(Q[layer],Q[layer+1])
            b = np.zeros((1,Q[layer+1]))
            hyp = np.concatenate([hyp, A.ravel(), b.ravel()])

        A = -np.sqrt(6.0/(Q[-2]+Q[-1])) + 2.0*np.sqrt(6.0/(Q[-2]+Q[-1]))*np.random.rand(Q[-2],Q[-1])
        b = np.zeros((1,Q[-1]))
        hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        A = -np.sqrt(6.0/(Q[-2]+Q[-1])) + 2.0*np.sqrt(6.0/(Q[-2]+Q[-1]))*np.random.rand(Q[-2],Q[-1])
        b = np.zeros((1,Q[-1]))
        hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        return hyp 
Example #5
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def get_ith_minibatch_ixs_fences(b_i, batch_size, fences):
    """Split timeseries data of uneven sequence lengths into batches.
    This is how we handle different sized sequences.
    
    @param b_i: integer
                iteration index
    @param batch_size: integer
                       size of batch
    @param fences: list of integers
                   sequence of cutoff array
    @return idx: integer
    @return batch_slice: slice object
    """
    num_data = len(fences) - 1
    num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
    b_i = b_i % num_minibatches
    idx = slice(b_i * batch_size, (b_i+1) * batch_size)
    batch_i = np.arange(num_data)[idx]
    batch_slice = np.concatenate([range(i, j) for i, j in 
                                  zip(fences[batch_i], fences[batch_i+1])])
    return idx, batch_slice 
Example #6
Source File: natural_gradient_black_box_svi.py    From autograd with MIT License 6 votes vote down vote up
def optimize_and_lls(optfun):
        num_iters = 200
        elbos     = []
        def callback(params, t, g):
            elbo_val = -objective(params, t)
            elbos.append(elbo_val)
            if t % 50 == 0:
                print("Iteration {} lower bound {}".format(t, elbo_val))

        init_mean    = -1 * np.ones(D)
        init_log_std = -5 * np.ones(D)
        init_var_params = np.concatenate([init_mean, init_log_std])
        variational_params = optfun(num_iters, init_var_params, callback)
        return np.array(elbos)

    # let's optimize this with a few different step sizes 
Example #7
Source File: test_numpy.py    From autograd with MIT License 6 votes vote down vote up
def test_cast_to_int():
    inds = np.ones(5)[:,None]

    def fun(W):
        # glue W and inds together
        glued_together = np.concatenate((W, inds), axis=1)

        # separate W and inds back out
        new_W = W[:,:-1]
        new_inds = np.int64(W[:,-1])

        assert new_inds.dtype == np.int64
        return new_W[new_inds].sum()

    W = np.random.randn(5, 10)
    check_grads(fun)(W) 
Example #8
Source File: bayesian_optimization.py    From autograd with MIT License 6 votes vote down vote up
def callback(X, y, predict_func, acquisition_function, next_point, new_value):
        plt.cla()

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(domain_min, domain_max, 300), (300,1))
        pred_mean, pred_std = predict_func(plot_xs)
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * pred_std,
                               (pred_mean + 1.96 * pred_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        ax.plot(X, y, 'kx')
        ax.plot(next_point, new_value, 'ro')

        alphas = acquisition_function(plot_xs)
        ax.plot(plot_xs, alphas, 'r')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1) 
Example #9
Source File: deep_gaussian_process.py    From autograd with MIT License 6 votes vote down vote up
def plot_gp(ax, X, y, pred_mean, pred_cov, plot_xs):
        ax.cla()
        marg_std = np.sqrt(np.diag(pred_cov))
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * marg_std,
                               (pred_mean + 1.96 * marg_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        # Show samples from posterior.
        rs = npr.RandomState(0)
        sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10)
        ax.plot(plot_xs, sampled_funcs.T)
        ax.plot(X, y, 'kx')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([]) 
Example #10
Source File: util.py    From pymop with Apache License 2.0 5 votes vote down vote up
def uniform_reference_directions(self, n_partitions, n_dim):
        ref_dirs = []
        ref_dir = anp.full(n_dim, anp.inf)
        self.__uniform_reference_directions(ref_dirs, ref_dir, n_partitions, n_partitions, 0)
        return anp.concatenate(ref_dirs, axis=0) 
Example #11
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_concatenate_3d():    combo_check(np.concatenate, [0])([(R(2, 2, 2), R(2, 2, 2))], axis=[0, 1, 2]) 
Example #12
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_simple_concatenate():
    A = npr.randn(5, 6, 4)
    B = npr.randn(4, 6, 4)
    def fun(x): return np.concatenate((A, x))
    check_grads(fun)(B) 
Example #13
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_concatenate_axis_0():
    A = npr.randn(5, 6, 4)
    B = npr.randn(5, 6, 4)
    def fun(x): return np.concatenate((B, x, B))
    check_grads(fun)(A) 
Example #14
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_concatenate_axis_1():
    A = npr.randn(5, 6, 4)
    B = npr.randn(5, 6, 4)
    def fun(x): return np.concatenate((B, x, B), axis=1)
    check_grads(fun)(A) 
Example #15
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_slogdet_3d():
    fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
    mat = np.concatenate([(rand_psd(5) + 5*np.eye(5))[None,...] for _ in range(3)])
    check_grads(fun)(mat) 
Example #16
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_cholesky_broadcast():
    fun = lambda A: np.linalg.cholesky(A)
    A = np.concatenate([rand_psd(6)[None, :, :] for i in range(3)], axis=0)
    check_symmetric_matrix_grads(fun)(A) 
Example #17
Source File: autoptim.py    From autoptim with MIT License 5 votes vote down vote up
def _vectorize(optim_vars):
    shapes = [var.shape for var in optim_vars]
    x = np.concatenate([var.ravel() for var in optim_vars])
    return x, shapes 
Example #18
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def feature_tensor(self, X):
        """
        Compute the feature tensor which is n x d x J.
        The feature tensor can be used to compute the statistic, and the
        covariance matrix for simulating from the null distribution.

        X: n x d data numpy array

        return an n x d x J numpy array
        """
        k = self.k
        J = self.V.shape[0]
        n, d = X.shape
        # n x d matrix of gradients
        grad_logp = self.p.grad_log(X)
        #assert np.all(util.is_real_num(grad_logp))
        # n x J matrix
        #print 'V'
        #print self.V
        K = k.eval(X, self.V)
        #assert np.all(util.is_real_num(K))

        list_grads = np.array([np.reshape(k.gradX_y(X, v), (1, n, d)) for v in self.V])
        stack0 = np.concatenate(list_grads, axis=0)
        #a numpy array G of size n x d x J such that G[:, :, J]
        #    is the derivative of k(X, V_j) with respect to X.
        dKdV = np.transpose(stack0, (1, 2, 0))

        # n x d x J tensor
        grad_logp_K = util.outer_rows(grad_logp, K)
        #print 'grad_logp'
        #print grad_logp.dtype
        #print grad_logp
        #print 'K'
        #print K
        Xi = old_div((grad_logp_K + dKdV),np.sqrt(d*J))
        #Xi = (grad_logp_K + dKdV)
        return Xi 
Example #19
Source File: test_scipy.py    From autograd with MIT License 5 votes vote down vote up
def test_mvn_logpdf_sing_cov(): combo_check(mvn.logpdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True]) 
Example #20
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pareto_front = anp.array([]).reshape((-1, 2))
        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_pareto_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pareto_front = anp.concatenate((pareto_front, anp.array([x1, x2]).T), axis=0)
        return pareto_front 
Example #21
Source File: VariationalAutoencoders.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def __init__(self, Y, layers_encoder, layers_decoder, 
                 max_iter = 2000, N_batch = 1, monitor_likelihood = 10, lrate = 1e-3):
        self.Y = Y
        self.Y_dim = Y.shape[1]
        self.Z_dim = layers_encoder[-1]
        self.layers_encoder = layers_encoder
        self.layers_decoder = layers_decoder
                
        self.max_iter = max_iter
        self.N_batch = N_batch
        self.monitor_likelihood = monitor_likelihood
        
        # Initialize encoder
        hyp =  self.initialize_NN(layers_encoder)
        self.idx_encoder = np.arange(hyp.shape[0])
        
        # Initialize decoder
        hyp = np.concatenate([hyp, self.initialize_NN(layers_decoder)])
        self.idx_decoder = np.arange(self.idx_encoder[-1]+1, hyp.shape[0])
                
        self.hyp = hyp
        
        # Adam optimizer parameters
        self.mt_hyp = np.zeros(hyp.shape)
        self.vt_hyp = np.zeros(hyp.shape)
        self.lrate = lrate
        
        print("Total number of parameters: %d" % (hyp.shape[0])) 
Example #22
Source File: LongShortTermMemoryNetworks.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def initialize_LSTM(self):
        hyp = np.array([])
        Q = self.hidden_dim
        
        # Forget Gate
        U_f = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_f = np.zeros((1,Q))
        W_f = np.eye(Q)
        hyp = np.concatenate([hyp, U_f.ravel(), b_f.ravel(), W_f.ravel()])
        
        # Input Gate
        U_i = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_i = np.zeros((1,Q))
        W_i = np.eye(Q)
        hyp = np.concatenate([hyp, U_i.ravel(), b_i.ravel(), W_i.ravel()])

        # Update Cell State
        U_s = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_s = np.zeros((1,Q))
        W_s = np.eye(Q)
        hyp = np.concatenate([hyp, U_s.ravel(), b_s.ravel(), W_s.ravel()])

        # Ouput Gate
        U_o = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_o = np.zeros((1,Q))
        W_o = np.eye(Q)
        hyp = np.concatenate([hyp, U_o.ravel(), b_o.ravel(), W_o.ravel()])

        V = -np.sqrt(6.0/(Q+self.Y_dim)) + 2.0*np.sqrt(6.0/(Q+self.Y_dim))*np.random.rand(Q,self.Y_dim)
        c = np.zeros((1,self.Y_dim))
        hyp = np.concatenate([hyp, V.ravel(), c.ravel()])
    
        return hyp 
Example #23
Source File: RecurrentNeuralNetworks.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def initialize_RNN(self):
        hyp = np.array([])
        Q = self.hidden_dim
            
        U = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b = np.zeros((1,Q))
        W = np.eye(Q)
        hyp = np.concatenate([hyp, U.ravel(), b.ravel(), W.ravel()])            
        
        V = -np.sqrt(6.0/(Q+self.Y_dim)) + 2.0*np.sqrt(6.0/(Q+self.Y_dim))*np.random.rand(Q,self.Y_dim)
        c = np.zeros((1,self.Y_dim))
        hyp = np.concatenate([hyp, V.ravel(), c.ravel()])
    
        return hyp 
Example #24
Source File: NeuralNetworks.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def initialize_NN(self, Q):
        hyp = np.array([])
        layers = Q.shape[0]
        for layer in range(0,layers-1):
            A = -np.sqrt(6.0/(Q[layer]+Q[layer+1])) + 2.0*np.sqrt(6.0/(Q[layer]+Q[layer+1]))*np.random.rand(Q[layer],Q[layer+1])
            b = np.zeros((1,Q[layer+1]))
            hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        return hyp 
Example #25
Source File: linear_models.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def _add_intercept(X):
        b = np.ones([X.shape[0], 1])
        return np.concatenate([b, X], axis=1) 
Example #26
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_concatenate_1ist():  combo_check(np.concatenate, [0])([(R(1), R(3))],             axis=[0]) 
Example #27
Source File: test_scipy.py    From autograd with MIT License 5 votes vote down vote up
def test_mvn_pdf_sing_cov(): combo_check(mvn.pdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True]) 
Example #28
Source File: flatten.py    From autograd with MIT License 5 votes vote down vote up
def _concatenate(lst):
    lst = list(lst)
    return np.concatenate(lst) if lst else np.array([]) 
Example #29
Source File: bayesian_neural_net.py    From autograd with MIT License 5 votes vote down vote up
def build_toy_dataset(n_data=40, noise_std=0.1):
    D = 1
    rs = npr.RandomState(0)
    inputs  = np.concatenate([np.linspace(0, 2, num=n_data/2),
                              np.linspace(6, 8, num=n_data/2)])
    targets = np.cos(inputs) + rs.randn(n_data) * noise_std
    inputs = (inputs - 4.0) / 4.0
    inputs  = inputs.reshape((len(inputs), D))
    targets = targets.reshape((len(targets), D))
    return inputs, targets 
Example #30
Source File: black_box_svi.py    From autograd with MIT License 5 votes vote down vote up
def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
        x = np.linspace(*xlimits, num=numticks)
        y = np.linspace(*ylimits, num=numticks)
        X, Y = np.meshgrid(x, y)
        zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
        Z = zs.reshape(X.shape)
        plt.contour(X, Y, Z)
        ax.set_yticks([])
        ax.set_xticks([])

    # Set up figure.