Python autograd.numpy.where() Examples
The following are 12
code examples of autograd.numpy.where().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
autograd.numpy
, or try the search function
.
Example #1
Source File: util.py From kernel-gof with MIT License | 6 votes |
def one_of_K_code(arr): """ Make a one-of-K coding out of the numpy array. For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form [[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]] """ U = np.unique(arr) n = len(arr) nu = len(U) X = np.zeros((n, nu)) for i, u in enumerate(U): Ii = np.where( np.abs(arr - u) < 1e-8 ) #ni = len(Ii) X[Ii[0], i] = 1 return X
Example #2
Source File: wavelet.py From scarlet with MIT License | 5 votes |
def filter(self, niter = 20, k = 5): """ Applies wavelet iterative filtering to denoise the image Parameters ---------- niter: int number of iterations k: float threshold in units of noise levels below which coefficients are thresholded lvl: int Number of wavelet scale to use in the decomposition Results ------- filtered: array the image of filtered images """ if self._coeffs is None: self.coefficients if self._image is None: self.image() sigma = k * mad_wavelet(self._image)[:, None] * self.norm[None, :] filtered = 0 image = self._image wavelet = self._coeffs support = np.where(np.abs(wavelet[:,:-1,:,:]) < sigma[:,:-1,None, None] * np.ones_like(wavelet[:,:-1,:,:])) for i in range(niter): R = image - filtered R_coeff = Starlet(R) R_coeff.coefficients[support] = 0 filtered += R_coeff.image filtered[filtered < 0] = 0 self.image = filtered return filtered
Example #3
Source File: geometry.py From AeroSandbox with MIT License | 5 votes |
def add_control_surface(self, deflection=0., hinge_point=0.75): # Returns a version of the airfoil with a control surface added at a given point. # Inputs: # # deflection: the deflection angle, in degrees. Downwards-positive. # # hinge_point: the location of the hinge, as a fraction of chord. # Make the rotation matrix for the given angle. sintheta = np.sin(np.radians(-deflection)) costheta = np.cos(np.radians(-deflection)) rotation_matrix = np.array( [[costheta, -sintheta], [sintheta, costheta]] ) # Find the hinge point hinge_point = np.array( (hinge_point, self.get_camber_at_chord_fraction(hinge_point))) # Make hinge_point a vector. # Split the airfoil into the sections before and after the hinge split_index = np.where(self.mcl_coordinates[:, 0] > hinge_point[0])[0][0] mcl_coordinates_before = self.mcl_coordinates[:split_index, :] mcl_coordinates_after = self.mcl_coordinates[split_index:, :] upper_minus_mcl_before = self.upper_minus_mcl[:split_index, :] upper_minus_mcl_after = self.upper_minus_mcl[split_index:, :] # Rotate the mean camber line (MCL) and "upper minus mcl" new_mcl_coordinates_after = np.transpose( rotation_matrix @ np.transpose(mcl_coordinates_after - hinge_point)) + hinge_point new_upper_minus_mcl_after = np.transpose(rotation_matrix @ np.transpose(upper_minus_mcl_after)) # Do blending # Assemble airfoil new_mcl_coordinates = np.vstack((mcl_coordinates_before, new_mcl_coordinates_after)) new_upper_minus_mcl = np.vstack((upper_minus_mcl_before, new_upper_minus_mcl_after)) upper_coordinates = np.flipud(new_mcl_coordinates + new_upper_minus_mcl) lower_coordinates = new_mcl_coordinates - new_upper_minus_mcl coordinates = np.vstack((upper_coordinates, lower_coordinates[1:, :])) new_airfoil = Airfoil(name=self.name + " flapped", coordinates=coordinates, repanel=False) return new_airfoil # TODO fix self-intersecting airfoils at high deflections
Example #4
Source File: beta_geo_fitter.py From lifetimes with MIT License | 5 votes |
def conditional_probability_alive( self, frequency, recency, T ): """ Compute conditional probability alive. Compute the probability that a customer with history (frequency, recency, T) is currently alive. From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf Parameters ---------- frequency: array or scalar historical frequency of customer. recency: array or scalar historical recency of customer. T: array or scalar age of the customer. Returns ------- array value representing a probability """ r, alpha, a, b = self._unload_params("r", "alpha", "a", "b") log_div = (r + frequency) * np.log((alpha + T) / (alpha + recency)) + np.log( a / (b + np.maximum(frequency, 1) - 1) ) return np.atleast_1d(np.where(frequency == 0, 1.0, expit(-log_div)))
Example #5
Source File: hmm_em.py From autograd with MIT License | 5 votes |
def normalize(a): def replace_zeros(a): return np.where(a > 0., a, 1.) return a / replace_zeros(a.sum(-1, keepdims=True))
Example #6
Source File: poisson.py From autograd with MIT License | 5 votes |
def grad_poisson_logpmf(k, mu): return np.where(k % 1 == 0, k / mu - 1, 0)
Example #7
Source File: chi2.py From autograd with MIT License | 5 votes |
def grad_chi2_logpdf(x, df): return np.where(df % 1 == 0, (df - x - 2) / (2 * x), 0)
Example #8
Source File: test_numpy.py From autograd with MIT License | 5 votes |
def test_where(): def fun(x, y): b = np.where(C, x, y) return b C = npr.randn(4, 5) > 0 A = npr.randn(4, 5) B = npr.randn(4, 5) check_grads(fun)(A, B)
Example #9
Source File: linear_model.py From scikit-lego with MIT License | 5 votes |
def fit(self, X, y): X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES) if self.effect not in self.allowed_effects: raise ValueError(f"effect {self.effect} must be in {self.allowed_effects}") def deadzone(errors): if self.effect == "linear": return np.where(errors > self.threshold, errors, np.zeros(errors.shape)) if self.effect == "quadratic": return np.where( errors > self.threshold, errors ** 2, np.zeros(errors.shape) ) def training_loss(weights): diff = np.abs(np.dot(X, weights) - y) if self.relative: diff = diff / y return np.mean(deadzone(diff)) n, k = X.shape # Build a function that returns gradients of training loss using autograd. training_gradient_fun = grad(training_loss) # Check the gradients numerically, just to be safe. weights = np.random.normal(0, 1, k) if self.check_grad: check_grads(training_loss, modes=["rev"])(weights) # Optimize weights using gradient descent. self.loss_log_ = np.zeros(self.n_iter) self.wts_log_ = np.zeros((self.n_iter, k)) self.deriv_log_ = np.zeros((self.n_iter, k)) for i in range(self.n_iter): weights -= training_gradient_fun(weights) * self.stepsize self.wts_log_[i, :] = weights.ravel() self.loss_log_[i] = training_loss(weights) self.deriv_log_[i, :] = training_gradient_fun(weights).ravel() self.coefs_ = weights return self
Example #10
Source File: geometry.py From AeroSandbox with MIT License | 4 votes |
def get_bounding_cube(self): """ Finds the axis-aligned cube that encloses the airplane with the smallest size. Useful for plotting and getting a sense for the scale of a problem. Args: self.wings (iterable): All the wings included for analysis each containing their geometry in x,y,z notation using units of m Returns: tuple: Tuple of 4 floats x, y, z, and s, where x, y, and z are the coordinates of the cube center, and s is half of the side length. """ # Get vertices to enclose vertices = None for wing in self.wings: for xsec in wing.xsecs: if vertices is None: vertices = xsec.xyz_le + wing.xyz_le else: vertices = np.vstack(( vertices, xsec.xyz_le + wing.xyz_le )) vertices = np.vstack(( vertices, xsec.xyz_te() + wing.xyz_le )) if wing.symmetric: vertices = np.vstack(( vertices, reflect_over_XZ_plane(xsec.xyz_le + wing.xyz_le) )) vertices = np.vstack(( vertices, reflect_over_XZ_plane(xsec.xyz_te() + wing.xyz_le) )) # Enclose them x_max = np.max(vertices[:, 0]) y_max = np.max(vertices[:, 1]) z_max = np.max(vertices[:, 2]) x_min = np.min(vertices[:, 0]) y_min = np.min(vertices[:, 1]) z_min = np.min(vertices[:, 2]) x = np.mean((x_max, x_min)) y = np.mean((y_max, y_min)) z = np.mean((z_max, z_min)) s = 0.5 * np.max(( x_max - x_min, y_max - y_min, z_max - z_min )) return x, y, z, s
Example #11
Source File: beta_geo_fitter.py From lifetimes with MIT License | 4 votes |
def conditional_expected_number_of_purchases_up_to_time( self, t, frequency, recency, T ): """ Conditional expected number of purchases up to time. Calculate the expected number of repeat purchases up to time t for a randomly chosen individual from the population, given they have purchase history (frequency, recency, T). This function uses equation (10) from [2]_. Parameters ---------- t: array_like times to calculate the expectation for. frequency: array_like historical frequency of customer. recency: array_like historical recency of customer. T: array_like age of the customer. Returns ------- array_like References ---------- .. [2] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a), "Counting Your Customers the Easy Way: An Alternative to the Pareto/NBD Model," Marketing Science, 24 (2), 275-84. """ x = frequency r, alpha, a, b = self._unload_params("r", "alpha", "a", "b") _a = r + x _b = b + x _c = a + b + x - 1 _z = t / (alpha + T + t) ln_hyp_term = np.log(hyp2f1(_a, _b, _c, _z)) # if the value is inf, we are using a different but equivalent # formula to compute the function evaluation. ln_hyp_term_alt = np.log(hyp2f1(_c - _a, _c - _b, _c, _z)) + (_c - _a - _b) * np.log(1 - _z) ln_hyp_term = np.where(np.isinf(ln_hyp_term), ln_hyp_term_alt, ln_hyp_term) first_term = (a + b + x - 1) / (a - 1) second_term = 1 - np.exp(ln_hyp_term + (r + x) * np.log((alpha + T) / (alpha + t + T))) numerator = first_term * second_term denominator = 1 + (x > 0) * (a / (b + x - 1)) * ((alpha + T) / (alpha + recency)) ** (r + x) return numerator / denominator
Example #12
Source File: beta_geo_fitter.py From lifetimes with MIT License | 4 votes |
def probability_of_n_purchases_up_to_time( self, t, n ): r""" Compute the probability of n purchases. .. math:: P( N(t) = n | \text{model} ) where N(t) is the number of repeat purchases a customer makes in t units of time. Comes from equation (8) of [2]_. Parameters ---------- t: float number units of time n: int number of purchases Returns ------- float: Probability to have n purchases up to t units of time References ---------- .. [2] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a), "Counting Your Customers the Easy Way: An Alternative to the Pareto/NBD Model," Marketing Science, 24 (2), 275-84. """ r, alpha, a, b = self._unload_params("r", "alpha", "a", "b") first_term = ( beta(a, b + n) / beta(a, b) * gamma(r + n) / gamma(r) / gamma(n + 1) * (alpha / (alpha + t)) ** r * (t / (alpha + t)) ** n ) if n > 0: j = np.arange(0, n) finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) * (t / (alpha + t)) ** j).sum() second_term = beta(a + 1, b + n - 1) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum) else: second_term = 0 return first_term + second_term