Python numpy.ones_like() Examples
The following are 30
code examples of numpy.ones_like().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_masking.py From yatsm with MIT License | 6 votes |
def masking_data(request): # Two years, 8 day repeat x = np.arange(735851, 735851 + 365 * 2, 8) # Simulate some timeseries in green & swir1 def seasonality(x, amp): return np.cos(2 * np.pi / 365.25 * x) * amp green = np.ones_like(x) * 1000 + seasonality(x, 750) swir1 = np.ones_like(x) * 1250 + seasonality(x, 500) Y = np.vstack((green, swir1)) # Add in some noise idx_green_noise = 15 idx_swir1_noise = 30 Y[0, idx_green_noise] = 8000 Y[1, idx_swir1_noise] = 10 return x, Y, np.array([idx_green_noise, idx_swir1_noise])
Example #2
Source File: numerical.py From Kaggler with MIT License | 6 votes |
def _transform_col(self, x, i): """Encode one numerical feature column to quantiles. Args: x (pandas.Series): numerical feature column to encode i (int): column index of the numerical feature Returns: Encoded feature (pandas.Series). """ # Map values to the emperical CDF between .1% and 99.9% rv = np.ones_like(x) * -1 filt = ~np.isnan(x) rv[filt] = np.floor((self.ecdfs[i](x[filt]) * 0.998 + .001) * self.n_label) return rv
Example #3
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg): """ TODO :param densities_pos: :param densities_neg: :param uncerts_pos: :param uncerts_neg: :return: """ values_neg = np.concatenate( (densities_neg.reshape((1, -1)), uncerts_neg.reshape((1, -1))), axis=0).transpose([1, 0]) values_pos = np.concatenate( (densities_pos.reshape((1, -1)), uncerts_pos.reshape((1, -1))), axis=0).transpose([1, 0]) values = np.concatenate((values_neg, values_pos)) labels = np.concatenate( (np.zeros_like(densities_neg), np.ones_like(densities_pos))) lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels) return values, labels, lr
Example #4
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False): """ TODO :param probs_neg: :param probs_pos: :param plot: :return: """ probs = np.concatenate((probs_neg, probs_pos)) labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos))) fpr, tpr, _ = roc_curve(labels, probs) auc_score = auc(fpr, tpr) if plot: plt.figure(figsize=(7, 6)) plt.plot(fpr, tpr, color='blue', label='ROC (AUC = %0.4f)' % auc_score) plt.legend(loc='lower right') plt.title("ROC Curve") plt.xlabel("FPR") plt.ylabel("TPR") plt.show() return fpr, tpr, auc_score
Example #5
Source File: test_constraint.py From scarlet with MIT License | 6 votes |
def test_symmetry(self): shape = (5, 5) X = np.arange(shape[0] * shape[1], dtype=float).reshape(*shape) # symmetry step = 0 X_ = X.copy() constraint = scarlet.SymmetryConstraint() X_ = constraint(X_, step) new_X = np.ones_like(X) * 12 assert_almost_equal(X_, new_X) # symmetry at half strength X_ = X.copy() constraint = scarlet.SymmetryConstraint(strength=0.5) X_ = constraint(X_, step) new_X = [ [6.0, 6.5, 7.0, 7.5, 8.0], [8.5, 9.0, 9.5, 10.0, 10.5], [11.0, 11.5, 12.0, 12.5, 13.0], [13.5, 14.0, 14.5, 15.0, 15.5], [16.0, 16.5, 17.0, 17.5, 18.0], ] assert_almost_equal(X_, new_X)
Example #6
Source File: networks.py From connecting_the_dots with MIT License | 6 votes |
def tforward(self, disp0, im, std=None): self.pattern = self.pattern.to(disp0.device) self.uv0 = self.uv0.to(disp0.device) uv0 = self.uv0.expand(disp0.shape[0], *self.uv0.shape[1:]) uv1 = torch.empty_like(uv0) uv1[...,0] = uv0[...,0] - disp0.contiguous().view(disp0.shape[0],-1) uv1[...,1] = uv0[...,1] uv1[..., 0] = 2 * (uv1[..., 0] / (self.im_width-1) - 0.5) uv1[..., 1] = 2 * (uv1[..., 1] / (self.im_height-1) - 0.5) uv1 = uv1.view(-1, self.im_height, self.im_width, 2).clone() pattern = self.pattern.expand(disp0.shape[0], *self.pattern.shape[1:]) pattern_proj = torch.nn.functional.grid_sample(pattern, uv1, padding_mode='border') mask = torch.ones_like(im) if std is not None: mask = mask*std diff = torchext.photometric_loss(pattern_proj.contiguous(), im.contiguous(), 9, self.loss_type, self.loss_eps) val = (mask*diff).sum() / mask.sum() return val, pattern_proj
Example #7
Source File: eom_kccsd_ghf.py From pyscf with Apache License 2.0 | 6 votes |
def mask_frozen_ip(eom, vector, kshift, const=LARGE_DENOM): '''Replaces all frozen orbital indices of `vector` with the value `const`.''' r1, r2 = eom.vector_to_amplitudes(vector, kshift=kshift) nkpts = eom.nkpts nocc, nmo = eom.nocc, eom.nmo kconserv = eom.kconserv # Get location of padded elements in occupied and virtual space nonzero_opadding, nonzero_vpadding = eom.nonzero_opadding, eom.nonzero_vpadding new_r1 = const * np.ones_like(r1) new_r2 = const * np.ones_like(r2) new_r1[nonzero_opadding[kshift]] = r1[nonzero_opadding[kshift]] for ki in range(nkpts): for kj in range(nkpts): kb = kconserv[ki, kshift, kj] idx = np.ix_([ki], [kj], nonzero_opadding[ki], nonzero_opadding[kj], nonzero_vpadding[kb]) new_r2[idx] = r2[idx] return eom.amplitudes_to_vector(new_r1, new_r2, kshift, kconserv)
Example #8
Source File: eom_kccsd_ghf.py From pyscf with Apache License 2.0 | 6 votes |
def mask_frozen_ea(eom, vector, kshift, const=LARGE_DENOM): '''Replaces all frozen orbital indices of `vector` with the value `const`.''' r1, r2 = eom.vector_to_amplitudes(vector, kshift=kshift) kconserv = eom.kconserv nkpts = eom.nkpts nocc, nmo = eom.nocc, eom.nmo # Get location of padded elements in occupied and virtual space nonzero_opadding, nonzero_vpadding = eom.nonzero_opadding, eom.nonzero_vpadding new_r1 = const * np.ones_like(r1) new_r2 = const * np.ones_like(r2) new_r1[nonzero_vpadding[kshift]] = r1[nonzero_vpadding[kshift]] for kj in range(nkpts): for ka in range(nkpts): kb = kconserv[kshift, ka, kj] idx = np.ix_([kj], [ka], nonzero_opadding[kj], nonzero_vpadding[ka], nonzero_vpadding[kb]) new_r2[idx] = r2[idx] return eom.amplitudes_to_vector(new_r1, new_r2, kshift, kconserv)
Example #9
Source File: operations.py From simpleflow with MIT License | 6 votes |
def compute_gradient(self, grad=None): ''' Compute and return the gradient for matrix multiplication. :param grad: The gradient of other operation wrt the matmul output. :type grad: number or a ndarray, default value is 1.0. ''' # Get input values. x, y = [node.output_value for node in self.input_nodes] # Default gradient wrt the matmul output. if grad is None: grad = np.ones_like(self.output_value) # Gradients wrt inputs. dfdx = np.dot(grad, np.transpose(y)) dfdy = np.dot(np.transpose(x), grad) return [dfdx, dfdy]
Example #10
Source File: operations.py From simpleflow with MIT License | 6 votes |
def compute_gradient(self, grad=None): ''' Compute the gradient for negative operation wrt input value. :param grad: The gradient of other operation wrt the negative output. :type grad: ndarray. ''' input_value = self.input_nodes[0].output_value if grad is None: grad = np.ones_like(self.output_value) output_shape = np.array(np.shape(input_value)) output_shape[self.axis] = 1.0 tile_scaling = np.shape(input_value) // output_shape grad = np.reshape(grad, output_shape) return np.tile(grad, tile_scaling)
Example #11
Source File: pano_lsd_align.py From HorizonNet with MIT License | 6 votes |
def separatePano(panoImg, fov, x, y, imgSize=320): '''cut a panorama image into several separate views''' assert x.shape == y.shape if not isinstance(fov, np.ndarray): fov = fov * np.ones_like(x) sepScene = [ { 'img': imgLookAt(panoImg.copy(), xi, yi, imgSize, fovi), 'vx': xi, 'vy': yi, 'fov': fovi, 'sz': imgSize, } for xi, yi, fovi in zip(x, y, fov) ] return sepScene
Example #12
Source File: bottom_up.py From Dispersion-based-Clustering with MIT License | 6 votes |
def linkage_calculation(self, dist, labels, penalty): cluster_num = len(self.label_to_images.keys()) start_index = np.zeros(cluster_num,dtype=np.int) end_index = np.zeros(cluster_num,dtype=np.int) counts=0 i=0 for key in sorted(self.label_to_images.keys()): start_index[i] = counts end_index[i] = counts + len(self.label_to_images[key]) counts = end_index[i] i=i+1 dist=dist.numpy() linkages = np.zeros([cluster_num, cluster_num]) for i in range(cluster_num): for j in range(i, cluster_num): linkage = dist[start_index[i]:end_index[i], start_index[j]:end_index[j]] linkages[i,j] = np.average(linkage) linkages = linkages.T + linkages - linkages * np.eye(cluster_num) intra = linkages.diagonal() penalized_linkages = linkages + penalty * ((intra * np.ones_like(linkages)).T + intra).T return linkages, penalized_linkages
Example #13
Source File: test_optimization_methods.py From simnibs with GNU General Public License v3.0 | 6 votes |
def test_2_targets_field_component(self, optimization_variables_avg): l, Q, A = optimization_variables_avg l2 = l[::-1] l = np.vstack([l ,l2]) m = 2e-3 m1 = 4e-3 x = optimization_methods.optimize_field_component(l, max_el_current=m, max_total_current=m1) l_avg = np.average(l, axis=0) x_sp = optimize_comp(l_avg, np.ones_like(l2), max_el_current=m, max_total_current=m1) assert np.linalg.norm(x, 1) <= 2 * m1 + 1e-4 assert np.all(np.abs(x) <= m + 1e-6) assert np.isclose(l_avg.dot(x), l_avg.dot(x_sp), rtol=1e-4, atol=1e-4) assert np.isclose(np.sum(x), 0)
Example #14
Source File: visualization_utils.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4): """Draws mask on an image. Args: image: uint8 numpy array with shape (img_height, img_height, 3) mask: a uint8 numpy array of shape (img_height, img_height) with values between either 0 or 1. color: color to draw the keypoints with. Default is red. alpha: transparency value between 0 and 1. (default: 0.4) Raises: ValueError: On incorrect data type for image or masks. """ if image.dtype != np.uint8: raise ValueError('`image` not of type np.uint8') if mask.dtype != np.uint8: raise ValueError('`mask` not of type np.uint8') if np.any(np.logical_and(mask != 1, mask != 0)): raise ValueError('`mask` elements should be in [0, 1]') if image.shape[:2] != mask.shape: raise ValueError('The image has spatial dimensions %s but the mask has ' 'dimensions %s' % (image.shape[:2], mask.shape)) rgb = ImageColor.getrgb(color) pil_image = Image.fromarray(image) solid_color = np.expand_dims( np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) np.copyto(image, np.array(pil_image.convert('RGB')))
Example #15
Source File: visualization_utils.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def draw_mask_on_image_array(image, mask, color='red', alpha=0.7): """Draws mask on an image. Args: image: uint8 numpy array with shape (img_height, img_height, 3) mask: a uint8 numpy array of shape (img_height, img_height) with values between either 0 or 1. color: color to draw the keypoints with. Default is red. alpha: transparency value between 0 and 1. (default: 0.7) Raises: ValueError: On incorrect data type for image or masks. """ if image.dtype != np.uint8: raise ValueError('`image` not of type np.uint8') if mask.dtype != np.uint8: raise ValueError('`mask` not of type np.uint8') if np.any(np.logical_and(mask != 1, mask != 0)): raise ValueError('`mask` elements should be in [0, 1]') rgb = ImageColor.getrgb(color) pil_image = Image.fromarray(image) solid_color = np.expand_dims( np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) np.copyto(image, np.array(pil_image.convert('RGB')))
Example #16
Source File: fancy.py From brainforge with GNU General Public License v3.0 | 5 votes |
def backpropagate(self, delta: np.ndarray) -> np.ndarray: output = delta * self.mask self.mask = np.ones_like(self.mask) * self.dropchance return output
Example #17
Source File: program_executor.py From NSCL-PyTorch-Release with MIT License | 5 votes |
def relate(self, scene, x, f): relations = scene['relationships'] t = x.argmax(-1) assert len(f) == 1 f = f[0] y = np.ones_like(x) for i in range(len(y)): if i not in relations[f][t]: y[i] = 0 return y
Example #18
Source File: test_datetime.py From recruit with Apache License 2.0 | 5 votes |
def test_datetime_like(self): a = np.array([3], dtype='m8[4D]') b = np.array(['2012-12-21'], dtype='M8[D]') assert_equal(np.ones_like(a).dtype, a.dtype) assert_equal(np.zeros_like(a).dtype, a.dtype) assert_equal(np.empty_like(a).dtype, a.dtype) assert_equal(np.ones_like(b).dtype, b.dtype) assert_equal(np.zeros_like(b).dtype, b.dtype) assert_equal(np.empty_like(b).dtype, b.dtype)
Example #19
Source File: program_executor.py From NSCL-PyTorch-Release with MIT License | 5 votes |
def filter(self, scene, x, filters): objects = scene['objects'] y = np.ones_like(x) for i, o in enumerate(objects): for f in filters: attr = gdef.concept2attribute[f] if (isinstance(o[attr], six.string_types) and o[attr] != f) or (isinstance(o[attr], (tuple, list)) and f not in o[attr]): y[i] = 0 break return np.minimum(x, y)
Example #20
Source File: utils.py From sadl with MIT License | 5 votes |
def compute_roc_auc(test_sa, adv_sa, split=1000): tr_test_sa = np.array(test_sa[:split]) tr_adv_sa = np.array(adv_sa[:split]) tr_values = np.concatenate( (tr_test_sa.reshape(-1, 1), tr_adv_sa.reshape(-1, 1)), axis=0 ) tr_labels = np.concatenate( (np.zeros_like(tr_test_sa), np.ones_like(tr_adv_sa)), axis=0 ) lr = LogisticRegressionCV(cv=5, n_jobs=-1).fit(tr_values, tr_labels) ts_test_sa = np.array(test_sa[split:]) ts_adv_sa = np.array(adv_sa[split:]) values = np.concatenate( (ts_test_sa.reshape(-1, 1), ts_adv_sa.reshape(-1, 1)), axis=0 ) labels = np.concatenate( (np.zeros_like(ts_test_sa), np.ones_like(ts_adv_sa)), axis=0 ) probs = lr.predict_proba(values)[:, 1] _, _, auc_score = compute_roc( probs_neg=probs[: (len(test_sa) - split)], probs_pos=probs[(len(test_sa) - split) :], ) return auc_score
Example #21
Source File: test_numeric.py From recruit with Apache License 2.0 | 5 votes |
def test_ones_like(self): self.check_like_function(np.ones_like, 1)
Example #22
Source File: utils.py From sadl with MIT License | 5 votes |
def compute_roc(probs_neg, probs_pos): probs = np.concatenate((probs_neg, probs_pos)) labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos))) fpr, tpr, _ = roc_curve(labels, probs) auc_score = auc(fpr, tpr) return fpr, tpr, auc_score
Example #23
Source File: timeresp_test.py From python-control with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_lsim_double_integrator(self): # Note: scipy.signal.lsim fails if A is not invertible A = np.mat("0. 1.;0. 0.") B = np.mat("0.; 1.") C = np.mat("1. 0.") D = 0. sys = StateSpace(A, B, C, D) def check(u, x0, xtrue): _t, yout, xout = forced_response(sys, t, u, x0) np.testing.assert_array_almost_equal(xout, xtrue, decimal=6) ytrue = np.squeeze(np.asarray(C.dot(xtrue))) np.testing.assert_array_almost_equal(yout, ytrue, decimal=6) # test with zero input npts = 10 t = np.linspace(0, 1, npts) u = np.zeros_like(t) x0 = np.array([2., 3.]) xtrue = np.zeros((2, npts)) xtrue[0, :] = x0[0] + t * x0[1] xtrue[1, :] = x0[1] check(u, x0, xtrue) # test with step input u = np.ones_like(t) xtrue = np.array([0.5 * t**2, t]) x0 = np.array([0., 0.]) check(u, x0, xtrue) # test with linear input u = t xtrue = np.array([1./6. * t**3, 0.5 * t**2]) check(u, x0, xtrue)
Example #24
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_integer_power_with_integer_zero_exponent(self): dtypes = np.typecodes['Integer'] for dt in dtypes: arr = np.arange(-10, 10, dtype=dt) assert_equal(np.power(arr, 0), np.ones_like(arr)) dtypes = np.typecodes['UnsignedInteger'] for dt in dtypes: arr = np.arange(10, dtype=dt) assert_equal(np.power(arr, 0), np.ones_like(arr))
Example #25
Source File: bottom_up.py From Dispersion-based-Clustering with MIT License | 5 votes |
def select_merge_data_v2(self, u_feas, labels, linkages): linkages += np.tril(100000 * np.ones_like(linkages)) ind = np.unravel_index(np.argsort(linkages, axis=None), linkages.shape) idx1 = ind[0] idx2 = ind[1] return idx1, idx2
Example #26
Source File: utils.py From tangent with Apache License 2.0 | 5 votes |
def test_reverse_array(func, motion, optimized, preserve_result, *args): """Test gradients of functions with NumPy-compatible signatures.""" def tangent_func(): y = func(*deepcopy(args)) if np.array(y).size > 1: init_grad = np.ones_like(y) else: init_grad = 1 func.__globals__['np'] = np df = tangent.autodiff( func, mode='reverse', motion=motion, optimized=optimized, preserve_result=preserve_result, verbose=1) if motion == 'joint': return df(*deepcopy(args) + (init_grad,)) return df(*deepcopy(args), init_grad=init_grad) def reference_func(): func.__globals__['np'] = ag_np if preserve_result: val, gradval = ag_value_and_grad(func)(*deepcopy(args)) return gradval, val else: return ag_grad(func)(*deepcopy(args)) def backup_reference_func(): func.__globals__['np'] = np df_num = numeric_grad(func) gradval = df_num(*deepcopy(args)) if preserve_result: val = func(*deepcopy(args)) return gradval, val else: return gradval assert_result_matches_reference(tangent_func, reference_func, backup_reference_func)
Example #27
Source File: utils.py From tangent with Apache License 2.0 | 5 votes |
def numeric_grad(func, eps=1e-6): """Generate a finite-differences gradient of function `f`. def f(x, *args): ... return scalar g = numeric_grad(f, eps=1e-4) finite_difference_grad_of_x = g(x, *args) Adapted from github.com/hips/autograd """ def g(x, *args): fd_grad, unflatten_fd = flatten(tangent.init_grad(x)) y = func(deepcopy(x), *args) seed = np.ones_like(y) for d in range(fd_grad.size): x_flat, unflatten_x = flatten(deepcopy(x)) x_flat[d] += eps / 2 a = np.array(func(unflatten_x(x_flat), *args)) x_flat, unflatten_x = flatten(deepcopy(x)) x_flat[d] -= eps / 2 b = np.array(func(unflatten_x(x_flat), *args)) fd_grad[d] = np.dot((a - b) / eps, seed) return unflatten_fd(fd_grad) return g
Example #28
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_integer_power_of_1(self): dtypes = np.typecodes['AllInteger'] for dt in dtypes: arr = np.arange(10, dtype=dt) assert_equal(np.power(1, arr), np.ones_like(arr))
Example #29
Source File: test_optimization_methods.py From simnibs with GNU General Public License v3.0 | 5 votes |
def test_both_constraints_infeasible(self, optimization_variables_avg): l, Q, A = optimization_variables_avg m = 2e-3 m1 = 4e-3 f = 4e-2 x = optimization_methods.optimize_focality(l, Q, f, max_el_current=m, max_total_current=m1) x_sp = optimize_comp(l, np.ones_like(l), max_el_current=m, max_total_current=m1) assert np.linalg.norm(x, 1) <= 2 * m1 + 1e-4 assert np.all(np.abs(x) <= m + 1e-4) assert np.isclose(np.sum(x), 0) assert np.allclose(l.dot(x), l.dot(x_sp), rtol=1e-4, atol=1e-5)
Example #30
Source File: program_executor.py From NSCL-PyTorch-Release with MIT License | 5 votes |
def relate_ae(self, scene, x, attr): objects = scene['objects'] t = x.argmax(-1) y = np.ones_like(x) for i, o in enumerate(objects): if o[attr] != objects[t][attr] or i == t: y[i] = 0 return y