Python autograd.numpy.arange() Examples
The following are 30
code examples of autograd.numpy.arange().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
autograd.numpy
, or try the search function
.
Example #1
Source File: train.py From tree-regularization-public with MIT License | 6 votes |
def average_path_length(tree, X): """Compute average path length: cost of simulating the average example; this is used in the objective function. @param tree: DecisionTreeClassifier instance @param X: NumPy array (D x N) D := number of dimensions N := number of examples @return path_length: float average path length """ leaf_indices = tree.apply(X) leaf_counts = np.bincount(leaf_indices) leaf_i = np.arange(tree.tree_.node_count) path_length = np.dot(leaf_i, leaf_counts) / float(X.shape[0]) return path_length
Example #2
Source File: train.py From tree-regularization-public with MIT License | 6 votes |
def get_ith_minibatch_ixs_fences(b_i, batch_size, fences): """Split timeseries data of uneven sequence lengths into batches. This is how we handle different sized sequences. @param b_i: integer iteration index @param batch_size: integer size of batch @param fences: list of integers sequence of cutoff array @return idx: integer @return batch_slice: slice object """ num_data = len(fences) - 1 num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0) b_i = b_i % num_minibatches idx = slice(b_i * batch_size, (b_i+1) * batch_size) batch_i = np.arange(num_data)[idx] batch_slice = np.concatenate([range(i, j) for i, j in zip(fences[batch_i], fences[batch_i+1])]) return idx, batch_slice
Example #3
Source File: fdfd.py From ceviche with MIT License | 6 votes |
def _make_A(self, eps_vec): eps_vec_xx, eps_vec_yy = self._grid_average_2d(eps_vec) eps_vec_xx_inv = 1 / (eps_vec_xx + 1e-5) # the 1e-5 is for numerical stability eps_vec_yy_inv = 1 / (eps_vec_yy + 1e-5) # autograd throws 'divide by zero' errors. indices_diag = npa.vstack((npa.arange(self.N), npa.arange(self.N))) entries_DxEpsy, indices_DxEpsy = spsp_mult(self.entries_Dxb, self.indices_Dxb, eps_vec_yy_inv, indices_diag, self.N) entires_DxEpsyDx, indices_DxEpsyDx = spsp_mult(entries_DxEpsy, indices_DxEpsy, self.entries_Dxf, self.indices_Dxf, self.N) entries_DyEpsx, indices_DyEpsx = spsp_mult(self.entries_Dyb, self.indices_Dyb, eps_vec_xx_inv, indices_diag, self.N) entires_DyEpsxDy, indices_DyEpsxDy = spsp_mult(entries_DyEpsx, indices_DyEpsx, self.entries_Dyf, self.indices_Dyf, self.N) entries_d = 1 / EPSILON_0 * npa.hstack((entires_DxEpsyDx, entires_DyEpsxDy)) indices_d = npa.hstack((indices_DxEpsyDx, indices_DyEpsxDy)) entries_diag = MU_0 * self.omega**2 * npa.ones(self.N) entries_a = npa.hstack((entries_d, entries_diag)) indices_a = npa.hstack((indices_d, indices_diag)) return entries_a, indices_a
Example #4
Source File: compute_sfs.py From momi2 with GNU General Public License v3.0 | 6 votes |
def expected_tmrca(demography, sampled_pops=None, sampled_n=None): """ The expected time to most recent common ancestor of the sample. Parameters ---------- demography : Demography Returns ------- tmrca : float-like See Also -------- expected_deme_tmrca : tmrca of subsample within a deme expected_sfs_tensor_prod : compute general class of summary statistics """ vecs = [np.ones(n + 1) for n in demography.sampled_n] n0 = len(vecs[0]) - 1.0 vecs[0] = np.arange(n0 + 1) / n0 return np.squeeze(expected_sfs_tensor_prod(vecs, demography))
Example #5
Source File: utils.py From ceviche with MIT License | 6 votes |
def make_IO_matrices(indices, N): """ Makes matrices that relate the sparse matrix entries to their locations in the matrix The kth column of I is a 'one hot' vector specifing the k-th entries row index into A The kth column of J is a 'one hot' vector specifing the k-th entries columnn index into A O = J^T is for notational convenience. Armed with a vector of M entries 'a', we can construct the sparse matrix 'A' as: A = I @ diag(a) @ O where 'diag(a)' is a (MxM) matrix with vector 'a' along its diagonal. In index notation: A_ij = I_ik * a_k * O_kj In an opposite way, given sparse matrix 'A' we can strip out the entries `a` using the IO matrices as follows: a = diag(I^T @ A @ O^T) In index notation: a_k = I_ik * A_ij * O_kj """ M = indices.shape[1] # number of indices in the matrix entries_1 = npa.ones(M) # M entries of all 1's ik, jk = indices # separate i and j components of the indices indices_I = npa.vstack((ik, npa.arange(M))) # indices into the I matrix indices_J = npa.vstack((jk, npa.arange(M))) # indices into the J matrix I = make_sparse(entries_1, indices_I, shape=(N, M)) # construct the I matrix J = make_sparse(entries_1, indices_J, shape=(N, M)) # construct the J matrix O = J.T # make O = J^T matrix for consistency with my notes. return I, O
Example #6
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 6 votes |
def resample(self): """Create a new SFS by resampling blocks with replacement. Note the resampled SFS is assumed to have the same length in base pairs \ as the original SFS, which may be a poor assumption if the blocks are not of equal length. :returns: Resampled SFS :rtype: :class:`Sfs` """ loci = np.random.choice( np.arange(self.n_loci), size=self.n_loci, replace=True) mat = self.freqs_matrix[:, loci] to_keep = np.asarray(mat.sum(axis=1) > 0).squeeze() to_keep = np.arange(len(self.configs))[to_keep] mat = mat[to_keep, :] configs = _ConfigList_Subset(self.configs, to_keep) return self.from_matrix(mat, configs, self.folded, self.length)
Example #7
Source File: sources.py From ceviche with MIT License | 6 votes |
def compute_f(theta, lambda0, dL, shape): """ Compute the 'vacuum' field vector """ # get plane wave k vector components (in units of grid cells) k0 = 2 * npa.pi / lambda0 * dL kx = k0 * npa.sin(theta) ky = -k0 * npa.cos(theta) # negative because downwards # array to write into f_src = npa.zeros(shape, dtype=npa.complex128) # get coordinates Nx, Ny = shape xpoints = npa.arange(Nx) ypoints = npa.arange(Ny) xv, yv = npa.meshgrid(xpoints, ypoints, indexing='ij') # compute values and insert into array x_PW = npa.exp(1j * xpoints * kx)[:, None] y_PW = npa.exp(1j * ypoints * ky)[:, None] f_src[xv, yv] = npa.outer(x_PW, y_PW) return f_src.flatten()
Example #8
Source File: size_history.py From momi2 with GNU General Public License v3.0 | 6 votes |
def sfs(self, n): if n == 0: return np.array([0.]) Et_jj = self.etjj(n) #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau) ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0) before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n) # ignore branch length above untruncated TMRCA if self.tau == float('inf'): before_tmrca = 0.0 ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca]))) return ret # def transition_prob(self, v, axis=0): # return moran_model.moran_action(self.scaled_time, v, axis=axis)
Example #9
Source File: fluidsim.py From autograd with MIT License | 6 votes |
def advect(f, vx, vy): """Move field f according to x and y velocities (u and v) using an implicit Euler integrator.""" rows, cols = f.shape cell_ys, cell_xs = np.meshgrid(np.arange(rows), np.arange(cols)) center_xs = (cell_xs - vx).ravel() center_ys = (cell_ys - vy).ravel() # Compute indices of source cells. left_ix = np.floor(center_xs).astype(int) top_ix = np.floor(center_ys).astype(int) rw = center_xs - left_ix # Relative weight of right-hand cells. bw = center_ys - top_ix # Relative weight of bottom cells. left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation. right_ix = np.mod(left_ix + 1, rows) top_ix = np.mod(top_ix, cols) bot_ix = np.mod(top_ix + 1, cols) # A linearly-weighted sum of the 4 surrounding cells. flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \ + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix]) return np.reshape(flat_f, (rows, cols))
Example #10
Source File: wing.py From autograd with MIT License | 6 votes |
def advect(f, vx, vy): """Move field f according to x and y velocities (u and v) using an implicit Euler integrator.""" rows, cols = f.shape cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows)) center_xs = (cell_xs - vx).ravel() center_ys = (cell_ys - vy).ravel() # Compute indices of source cells. left_ix = np.floor(center_ys).astype(int) top_ix = np.floor(center_xs).astype(int) rw = center_ys - left_ix # Relative weight of right-hand cells. bw = center_xs - top_ix # Relative weight of bottom cells. left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation. right_ix = np.mod(left_ix + 1, rows) top_ix = np.mod(top_ix, cols) bot_ix = np.mod(top_ix + 1, cols) # A linearly-weighted sum of the 4 surrounding cells. flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \ + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix]) return np.reshape(flat_f, (rows, cols))
Example #11
Source File: modified_beta_geo_fitter.py From lifetimes with MIT License | 5 votes |
def probability_of_n_purchases_up_to_time(self, t, n): r""" Compute the probability of n purchases up to time t. .. math:: P( N(t) = n | \text{model} ) where N(t) is the number of repeat purchases a customer makes in t units of time. Parameters ---------- t: float number units of time n: int number of purchases Returns ------- float: Probability to have n purchases up to t units of time """ r, alpha, a, b = self._unload_params("r", "alpha", "a", "b") _j = np.arange(0, n) first_term = ( beta(a, b + n + 1) / beta(a, b) * gamma(r + n) / gamma(r) / gamma(n + 1) * (alpha / (alpha + t)) ** r * (t / (alpha + t)) ** n ) finite_sum = (gamma(r + _j) / gamma(r) / gamma(_j + 1) * (t / (alpha + t)) ** _j).sum() second_term = beta(a + 1, b + n) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum) return first_term + second_term
Example #12
Source File: zakharov.py From pymoo with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): a = anp.sum(0.5 * anp.arange(1, self.n_var + 1) * x, axis=1) out["F"] = anp.sum(anp.square(x), axis=1) + anp.square(a) + anp.power(a, 4)
Example #13
Source File: methods.py From tf-quant-finance with Apache License 2.0 | 5 votes |
def taylor_approx(target, stencil, values): """Use taylor series to approximate up to second order derivatives. Args: target: An array of shape (..., n), a batch of n-dimensional points where one wants to approximate function value and derivatives. stencil: An array of shape broadcastable to (..., k, n), for each target point a set of k = triangle(n + 1) points to use on its approximation. values: An array of shape broadcastable to (..., k), the function value at each of the stencil points. Returns: An array of shape (..., k), for each target point the approximated function value, gradient and hessian evaluated at that point (flattened and in the same order as returned by derivative_names). """ # Broadcast arrays to their required shape. batch_shape, ndim = target.shape[:-1], target.shape[-1] stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim)) values = np.broadcast_to(values, stencil.shape[:-1]) # Subtract target from each stencil point. delta_x = stencil - np.expand_dims(target, axis=-2) delta_xy = np.matmul( np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2)) i = np.arange(ndim) j, k = np.triu_indices(ndim, k=1) # Build coefficients for the Taylor series equations, namely: # f(stencil) = coeffs @ [f(target), df/d0(target), ...] coeffs = np.concatenate([ np.ones(delta_x.shape[:-1] + (1,)), # f(target) delta_x, # df/di(target) delta_xy[..., i, i] / 2, # d^2f/di^2(target) delta_xy[..., j, k], # d^2f/{dj dk}(target) ], axis=-1) # Then: [f(target), df/d0(target), ...] = coeffs^{-1} @ f(stencil) return np.squeeze( np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1)
Example #14
Source File: rnn.py From autograd with MIT License | 5 votes |
def string_to_one_hot(string, maxchar): """Converts an ASCII string to a one-of-k encoding.""" ascii = np.array([ord(c) for c in string]).T return np.array(ascii[:,None] == np.arange(maxchar)[None, :], dtype=int)
Example #15
Source File: data.py From autograd with MIT License | 5 votes |
def load_mnist(): partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:]))) one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int) train_images, train_labels, test_images, test_labels = data_mnist.mnist() train_images = partial_flatten(train_images) / 255.0 test_images = partial_flatten(test_images) / 255.0 train_labels = one_hot(train_labels, 10) test_labels = one_hot(test_labels, 10) N_data = train_images.shape[0] return N_data, train_images, train_labels, test_images, test_labels
Example #16
Source File: data.py From autograd with MIT License | 5 votes |
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate, rs=npr.RandomState(0)): """Based on code by Ryan P. Adams.""" rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) features = rs.randn(num_classes*num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:, 0] += 1 labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:,0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return np.einsum('ti,tij->tj', features, rotations)
Example #17
Source File: run_synthetic_example.py From ParetoMTL with MIT License | 5 votes |
def get_d_paretomtl_init(grads,value,weights,i): # calculate the gradient direction for Pareto MTL initialization nobj, dim = grads.shape # check active constraints normalized_current_weight = weights[i]/np.linalg.norm(weights[i]) normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True) w = normalized_rest_weights - normalized_current_weight gx = np.dot(w,value/np.linalg.norm(value)) idx = gx > 0 if np.sum(idx) <= 0: return np.zeros(nobj) if np.sum(idx) == 1: sol = np.ones(1) else: vec = np.dot(w[idx],grads) sol, nd = MinNormSolver.find_min_norm_element(vec) # calculate the weights weight0 = np.sum(np.array([sol[j] * w[idx][j ,0] for j in np.arange(0, np.sum(idx))])) weight1 = np.sum(np.array([sol[j] * w[idx][j ,1] for j in np.arange(0, np.sum(idx))])) weight = np.stack([weight0,weight1]) return weight
Example #18
Source File: utils.py From ceviche with MIT License | 5 votes |
def get_spectrum(series, dt): """ Get FFT of series """ steps = len(series) times = np.arange(steps) * dt # reshape to be able to multiply by hamming window series = series.reshape((steps, -1)) # multiply with hamming window to get rid of numerical errors hamming_window = np.hamming(steps).reshape((steps, 1)) signal_f = my_fft(hamming_window * series) freqs = np.fft.fftfreq(steps, d=dt) return freqs, signal_f
Example #19
Source File: fdfd.py From ceviche with MIT License | 5 votes |
def _make_A(self, eps_vec): C = - 1 / MU_0 * self.Dxf.dot(self.Dxb) \ - 1 / MU_0 * self.Dyf.dot(self.Dyb) entries_c, indices_c = get_entries_indices(C) # indices into the diagonal of a sparse matrix entries_diag = - EPSILON_0 * self.omega**2 * eps_vec indices_diag = npa.vstack((npa.arange(self.N), npa.arange(self.N))) entries_a = npa.hstack((entries_diag, entries_c)) indices_a = npa.hstack((indices_diag, indices_c)) return entries_a, indices_a
Example #20
Source File: zakharov.py From pymop with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): a = np.sum(0.5 * np.arange(1, self.n_var + 1) * x, axis=1) out["F"] = np.sum(np.square(x), axis=1) + np.square(a) + np.power(a, 4)
Example #21
Source File: dtlz.py From pymop with Apache License 2.0 | 5 votes |
def get_scale(n, scale_factor): return anp.power(anp.full(n, scale_factor), anp.arange(n))
Example #22
Source File: VariationalAutoencoders.py From DeepLearningTutorial with MIT License | 5 votes |
def __init__(self, Y, layers_encoder, layers_decoder, max_iter = 2000, N_batch = 1, monitor_likelihood = 10, lrate = 1e-3): self.Y = Y self.Y_dim = Y.shape[1] self.Z_dim = layers_encoder[-1] self.layers_encoder = layers_encoder self.layers_decoder = layers_decoder self.max_iter = max_iter self.N_batch = N_batch self.monitor_likelihood = monitor_likelihood # Initialize encoder hyp = self.initialize_NN(layers_encoder) self.idx_encoder = np.arange(hyp.shape[0]) # Initialize decoder hyp = np.concatenate([hyp, self.initialize_NN(layers_decoder)]) self.idx_decoder = np.arange(self.idx_encoder[-1]+1, hyp.shape[0]) self.hyp = hyp # Adam optimizer parameters self.mt_hyp = np.zeros(hyp.shape) self.vt_hyp = np.zeros(hyp.shape) self.lrate = lrate print("Total number of parameters: %d" % (hyp.shape[0]))
Example #23
Source File: ConditionalVariationalAutoencoders.py From DeepLearningTutorial with MIT License | 5 votes |
def __init__(self, X, Y, layers_encoder_0, layers_encoder_1, layers_decoder, max_iter = 2000, N_batch = 1, monitor_likelihood = 10, lrate = 1e-3): self.X = X self.Y = Y self.Y_dim = Y.shape[1] self.Z_dim = layers_encoder_0[-1] self.layers_encoder_0 = layers_encoder_0 self.layers_encoder_1 = layers_encoder_1 self.layers_decoder = layers_decoder self.max_iter = max_iter self.N_batch = N_batch self.monitor_likelihood = monitor_likelihood # Initialize encoder_0 hyp = self.initialize_NN(layers_encoder_0) self.idx_encoder_0 = np.arange(hyp.shape[0]) # Initialize encoder_1 hyp = np.concatenate([hyp, self.initialize_NN(layers_encoder_1)]) self.idx_encoder_1 = np.arange(self.idx_encoder_0[-1]+1, hyp.shape[0]) # Initialize decoder hyp = np.concatenate([hyp, self.initialize_NN(layers_decoder)]) self.idx_decoder = np.arange(self.idx_encoder_1[-1]+1, hyp.shape[0]) self.hyp = hyp # Adam optimizer parameters self.mt_hyp = np.zeros(hyp.shape) self.vt_hyp = np.zeros(hyp.shape) self.lrate = lrate print("Total number of parameters: %d" % (hyp.shape[0]))
Example #24
Source File: convnet.py From MLAlgorithms with MIT License | 5 votes |
def backward_pass(self, delta): delta = delta.transpose(0, 2, 3, 1) pool_size = self.pool_shape[0] * self.pool_shape[1] y_max = np.zeros((delta.size, pool_size)) y_max[np.arange(self.arg_max.size), self.arg_max.flatten()] = delta.flatten() y_max = y_max.reshape(delta.shape + (pool_size,)) dcol = y_max.reshape(y_max.shape[0] * y_max.shape[1] * y_max.shape[2], -1) return column_to_image(dcol, self.last_input.shape, self.pool_shape, self.stride, self.padding)
Example #25
Source File: configurations.py From momi2 with GNU General Public License v3.0 | 5 votes |
def _vecs_and_idxs(self, folded): augmented_configs = self._augmented_configs(folded) augmented_idxs = self._augmented_idxs(folded) # construct the vecs vecs = [np.zeros((len(augmented_configs), n + 1)) for n in self.sampled_n] for i in range(len(vecs)): n = self.sampled_n[i] derived = np.einsum( "i,j->ji", np.ones(len(augmented_configs)), np.arange(n + 1)) curr = scipy.stats.hypergeom.pmf( k=augmented_configs[:, i, 1], M=n, n=derived, N=augmented_configs[:, i].sum(1) ) assert not np.any(np.isnan(curr)) vecs[i] = np.transpose(curr) # copy augmented_idxs to make it safe return vecs, dict(augmented_idxs) # def _config_str_iter(self): # for c in self.value: # yield _config2hashable(c)
Example #26
Source File: dtlz.py From pymoo with Apache License 2.0 | 5 votes |
def get_scale(n, scale_factor): return anp.power(anp.full(n, scale_factor), anp.arange(n))
Example #27
Source File: clutch.py From pymoo with Apache License 2.0 | 5 votes |
def __init__(self): super().__init__(n_var=5, n_obj=2, n_constr=19, type_var=anp.int) # ri, ro, t, F, Z # self.xl = anp.array([60, 90, 1, 600, 2]) self.xl = anp.array([0, 0, 0, 0, 0]) self.xu = anp.array([20, 20, 4, 400, 7]) self.x1 = anp.arange(60, 81) self.x2 = anp.arange(90, 111) self.x3 = anp.arange(1, 3.5, 0.5) self.x4 = anp.arange(600, 1001) self.x5 = anp.arange(2, 11)
Example #28
Source File: psf.py From scarlet with MIT License | 5 votes |
def moffat(y, x, alpha=4.7, beta=1.5, bbox=None): """Symmetric 2D Moffat function .. math:: (1+\frac{(x-x0)^2+(y-y0)^2}{\alpha^2})^{-\beta} Parameters ---------- y: float Vertical coordinate of the center x: float Horizontal coordinate of the center alpha: float Core width beta: float Power-law index bbox: Box Bounding box over which to evaluate the function Returns ------- result: array A 2D circular gaussian sampled at the coordinates `(y_i, x_j)` for all i and j in `shape`. """ Y = np.arange(bbox.shape[1]) + bbox.origin[1] X = np.arange(bbox.shape[2]) + bbox.origin[2] X, Y = np.meshgrid(X, Y) # TODO: has no pixel-integration formula return ((1 + ((X - x) ** 2 + (Y - y) ** 2) / alpha ** 2) ** -beta)[None, :, :]
Example #29
Source File: psf.py From scarlet with MIT License | 5 votes |
def gaussian(y, x, sigma=1, integrate=True, bbox=None): """Circular Gaussian Function Parameters ---------- y: float Vertical coordinate of the center x: float Horizontal coordinate of the center sigma: float Standard deviation of the gaussian integrate: bool Whether pixel integration is performed bbox: Box Bounding box over which to evaluate the function Returns ------- result: array A 2D circular gaussian sampled at the coordinates `(y_i, x_j)` for all i and j in `shape`. """ Y = np.arange(bbox.shape[1]) + bbox.origin[1] X = np.arange(bbox.shape[2]) + bbox.origin[2] def f(X): if not integrate: return np.exp(-(X ** 2) / (2 * sigma ** 2)) else: sqrt2 = np.sqrt(2) return ( np.sqrt(np.pi / 2) * sigma * ( scipy.special.erf((0.5 - X) / (sqrt2 * sigma)) + scipy.special.erf((2 * X + 1) / (2 * sqrt2 * sigma)) ) ) return (f(Y - y)[:, None] * f(X - x)[None, :])[None, :, :]
Example #30
Source File: wavelet.py From scarlet with MIT License | 5 votes |
def iuwt(starlet): """ Inverse starlet transform Parameters ---------- starlet: Shapelet object Starlet to be inverted Returns ------- cJ: array a 2D image that corresponds to the inverse transform of stralet. """ lvl, n1, n2 = np.shape(starlet) n = np.size(h) # Coarse scale cJ = fft.Fourier(starlet[-1, :, :]) for i in np.arange(1, lvl): newh = np.zeros((n + (n - 1) * (2 ** (lvl - i - 1) - 1), 1)) newh[0::2 ** (lvl - i - 1), 0] = h newhT = fft.Fourier(newh.T) newh = fft.Fourier(newh) # Line convolution cnew = fft.convolve(cJ, newh, axes=[0]) # Column convolution cnew = fft.convolve(cnew, newhT, axes=[1]) cJ = fft.Fourier(cnew.image + starlet[lvl - 1 - i, :, :]) return np.reshape(cJ.image, (n1, n2))