Python numpy.isscalar() Examples
The following are 30
code examples of numpy.isscalar().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: residual_plots.py From gmpe-smtk with GNU Affero General Public License v3.0 | 6 votes |
def _tojson(*numpy_objs): '''Utility function which returns a list where each element of numpy_objs is converted to its python equivalent (float or list)''' ret = [] # problem: browsers might not be happy with JSON 'NAN', so convert # NaNs to None. Unfortunately, the conversion must be done element wise # in numpy (seems not to exist a pandas na filter): for obj in numpy_objs: isscalar = np.isscalar(obj) nan_indices = None if isscalar else \ np.argwhere(np.isnan(obj)).flatten() # note: numpy.float64(N).tolist() returns a python float, so: obj = None if isscalar and np.isnan(obj) else obj.tolist() if nan_indices is not None: for idx in nan_indices: obj[idx] = None ret.append(obj) return ret # tuple(_.tolist() for _ in numpy_objs)
Example #2
Source File: test_distributions.py From aboleth with Apache License 2.0 | 6 votes |
def test_kl_gaussian_normal(random): """Test Gaussian/Normal KL.""" dim = (5, 10) Dim = (5, 10, 10) mu0 = random.randn(*dim).astype(np.float32) L0 = random_chol(Dim) q = tfp.distributions.MultivariateNormalTriL(mu0, L0) mu1 = random.randn(*dim).astype(np.float32) std1 = 1.0 L1 = [(std1 * np.eye(dim[1])).astype(np.float32) for _ in range(dim[0])] p = tf.distributions.Normal(mu1, std1) KL = kl_sum(q, p) KLr = KLdiv(mu0, L0, mu1, L1) tc = tf.test.TestCase() with tc.test_session(): kl = KL.eval() assert np.isscalar(kl) assert np.allclose(kl, KLr)
Example #3
Source File: async_.py From chainerrl with MIT License | 6 votes |
def extract_params_as_shared_arrays(link): assert isinstance(link, chainer.Link) shared_arrays = {} for param_name, param in link.namedparams(): typecode = param.array.dtype.char shared_arrays[param_name] = mp.RawArray(typecode, param.array.ravel()) for persistent_name, persistent in chainerrl.misc.namedpersistent(link): if isinstance(persistent, np.ndarray): typecode = persistent.dtype.char shared_arrays[persistent_name] = mp.RawArray( typecode, persistent.ravel()) else: assert np.isscalar(persistent) # Wrap by a 1-dim array because multiprocessing.RawArray does not # accept a 0-dim array. persistent_as_array = np.asarray([persistent]) typecode = persistent_as_array.dtype.char shared_arrays[persistent_name] = mp.RawArray( typecode, persistent_as_array) return shared_arrays
Example #4
Source File: utils.py From scanorama with MIT License | 6 votes |
def handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features. Adapted from sklearn.preprocessing.data''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale
Example #5
Source File: acer.py From chainerrl with MIT License | 6 votes |
def compute_policy_gradient_full_correction( action_distrib, action_distrib_mu, action_value, v, truncation_threshold): """Compute off-policy bias correction term wrt all actions.""" assert truncation_threshold is not None assert np.isscalar(v) with chainer.no_backprop_mode(): rho_all_inv = compute_full_importance(action_distrib_mu, action_distrib) correction_weight = ( np.maximum(1 - truncation_threshold * rho_all_inv, np.zeros_like(rho_all_inv)) * action_distrib.all_prob.array[0]) correction_advantage = action_value.q_values.array[0] - v return -F.sum(correction_weight * action_distrib.all_log_prob * correction_advantage, axis=1)
Example #6
Source File: acer.py From chainerrl with MIT License | 6 votes |
def compute_policy_gradient_sample_correction( action_distrib, action_distrib_mu, action_value, v, truncation_threshold): """Compute off-policy bias correction term wrt a sampled action.""" assert np.isscalar(v) assert truncation_threshold is not None with chainer.no_backprop_mode(): sample_action = action_distrib.sample().array rho_dash_inv = compute_importance( action_distrib_mu, action_distrib, sample_action) if (truncation_threshold > 0 and rho_dash_inv >= 1 / truncation_threshold): return chainer.Variable(np.asarray([0], dtype=np.float32)) correction_weight = max(0, 1 - truncation_threshold * rho_dash_inv) assert correction_weight <= 1 q = float(action_value.evaluate_actions(sample_action).array[0]) correction_advantage = q - v return -(correction_weight * action_distrib.log_prob(sample_action) * correction_advantage)
Example #7
Source File: fisheye.py From DualFisheye with MIT License | 6 votes |
def add_pixels(self, uv_px, img1d, weight=None): # Lookup row & column for each in-bounds coordinate. mask = self.get_mask(uv_px) xx = uv_px[0,mask] yy = uv_px[1,mask] # Update matrix according to assigned weight. if weight is None: img1d[mask] = self.img[yy,xx] elif np.isscalar(weight): img1d[mask] += self.img[yy,xx] * weight else: w1 = np.asmatrix(weight, dtype='float32') w3 = w1.transpose() * np.ones((1,3)) img1d[mask] += np.multiply(self.img[yy,xx], w3[mask]) # A panorama image made from several FisheyeImage sources. # TODO: Add support for supersampled anti-aliasing filters.
Example #8
Source File: test_layers.py From aboleth with Apache License 2.0 | 6 votes |
def test_dense_outputs(dense, make_data): """Make sure the dense layers output expected dimensions.""" x, _, _ = make_data S = 3 x_, X_ = _make_placeholders(x, S) N = x.shape[0] Phi, KL = dense(output_dim=D)(X_) tc = tf.test.TestCase() with tc.test_session(): tf.global_variables_initializer().run() P = Phi.eval(feed_dict={x_: x}) assert P.shape == (S, N, D) assert P.dtype == np.float32 assert np.isscalar(KL.eval(feed_dict={x_: x}))
Example #9
Source File: test_layers.py From aboleth with Apache License 2.0 | 6 votes |
def test_dense_embeddings(make_categories, reps, layer): """Test the embedding layer.""" x, K = make_categories x = np.repeat(x, reps, axis=-1) N = len(x) S = 3 x_, X_ = _make_placeholders(x, S, tf.int32) output, reg = layer(output_dim=D, n_categories=K)(X_) tc = tf.test.TestCase() with tc.test_session(): tf.global_variables_initializer().run() r = reg.eval() assert np.isscalar(r) assert r >= 0 Phi = output.eval(feed_dict={x_: x}) assert Phi.shape == (S, N, D * reps)
Example #10
Source File: shared.py From respy with MIT License | 6 votes |
def convert_dictionary_keys_to_dense_indices(dictionary): """Convert the keys to tuples containing integers. Example ------- >>> dictionary = {(0.0, 1): 0, 2: 1} >>> convert_dictionary_keys_to_dense_indices(dictionary) {(0, 1): 0, (2,): 1} """ new_dictionary = {} for key, val in dictionary.items(): new_key = (int(key),) if np.isscalar(key) else tuple(int(i) for i in key) new_dictionary[new_key] = val return new_dictionary
Example #11
Source File: random_test.py From tenpy with GNU General Public License v3.0 | 6 votes |
def gen_random_legcharge_nq(chinfo, ind_len, n_qsector): """return a random (unsorted) LegCharge with a given number of charge sectors. `nqsector` gives the (desired) number of sectors for each of the charges. """ if np.isscalar(n_qsector): n_qsector = [n_qsector] * chinfo.qnumber n_qsector = np.asarray(n_qsector, dtype=np.intp) if n_qsector.shape != (chinfo.qnumber, ): raise ValueError slices = rand_partitions(0, ind_len, np.prod(n_qsector, dtype=int)) qs = np.zeros((len(slices) - 1, len(n_qsector)), int) q_combos = [a for a in it.product(*[range(-(nq // 2), nq // 2 + 1) for nq in n_qsector])] qs = np.array(q_combos)[rand_distinct_int(0, len(q_combos) - 1, len(slices) - 1), :] qs = chinfo.make_valid(qs) return charges.LegCharge.from_qind(chinfo, slices, qs)
Example #12
Source File: binvox_rw.py From 3D-R2N2 with MIT License | 6 votes |
def sparse_to_dense(voxel_data, dims, dtype=np.bool): if voxel_data.ndim != 2 or voxel_data.shape[0] != 3: raise ValueError('voxel_data is wrong shape; should be 3xN array.') if np.isscalar(dims): dims = [dims] * 3 dims = np.atleast_2d(dims).T # truncate to integers xyz = voxel_data.astype(np.int) # discard voxels that fall outside dims valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0) xyz = xyz[:, valid_ix] out = np.zeros(dims.flatten(), dtype=dtype) out[tuple(xyz)] = True return out # def get_linear_index(x, y, z, dims): # """ Assuming xzy order. (y increasing fastest. # TODO ensure this is right when dims are not all same # """ # return x*(dims[1]*dims[2]) + z*dims[1] + y
Example #13
Source File: tensordot_npc.py From tenpy with GNU General Public License v3.0 | 6 votes |
def gen_random_legcharge_nq(chinfo, ind_len, n_qsector): """return a random (unsorted) LegCharge with a given number of charge sectors. `nqsector` gives the (desired) number of sectors for each of the charges. """ if np.isscalar(n_qsector): n_qsector = [n_qsector] * chinfo.qnumber n_qsector = np.asarray(n_qsector, dtype=np.intp) if n_qsector.shape != (chinfo.qnumber, ): raise ValueError slices = rand_partitions(0, ind_len, np.prod(n_qsector, dtype=int)) qs = np.zeros((len(slices) - 1, len(n_qsector)), int) q_combos = [a for a in it.product(*[range(-(nq // 2), nq // 2 + 1) for nq in n_qsector])] qs = np.array(q_combos)[rand_distinct_int(0, len(q_combos) - 1, len(slices) - 1), :] qs = chinfo.make_valid(qs) return npc.LegCharge.from_qind(chinfo, slices, qs)
Example #14
Source File: discrete.py From vampyre with MIT License | 6 votes |
def __init__(self, zval, pz, shape, var_axes=(0,),\ is_complex=False,name=None): # Convert scalars to arrays if np.isscalar(zval): zval = np.array([zval]) if np.isscalar(pz): pz = np.array([pz]) # Set parameters of base estimator dtype = zval.dtype BaseEst.__init__(self,shape=shape, var_axes=var_axes, dtype=dtype, name=name,\ type_name='DiscreteEst', nvars=1, cost_avail=True) # Set parameters self.zval = zval self.pz = pz self.shape = shape self.is_complex = is_complex self.fz = -np.log(pz)
Example #15
Source File: image_stats.py From skan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def image_summary(skeleton, *, spacing=1): """Compute some summary statistics for an image. Parameters ---------- skeleton : array, shape (M, N) The input image. Other Parameters ---------------- spacing : float or array of float, shape (`skeleton.ndim`,) The resolution along each axis of `skeleton`. Returns ------- stats : pandas.DataFrame Selected statistics about the image. """ stats = pd.DataFrame() stats['scale'] = [spacing] g, coords, degimg = csr.skeleton_to_csgraph(skeleton, spacing=spacing) degrees = np.diff(g.indptr) num_junctions = np.sum(degrees > 2) stats['number of junctions'] = num_junctions pixel_area = (spacing ** skeleton.ndim if np.isscalar(spacing) else np.prod(spacing)) stats['area'] = np.prod(skeleton.shape) * pixel_area stats['junctions per unit area'] = (stats['number of junctions'] / stats['area']) sizes = mesh_sizes(skeleton) stats['average mesh area'] = np.mean(sizes) stats['median mesh area'] = np.median(sizes) stats['mesh area standard deviation'] = np.std(sizes) structure = np.ones((3,) * skeleton.ndim) stats['number of disjoint skeletons'] = ndi.label(skeleton, structure)[1] return stats
Example #16
Source File: np_conserved.py From tenpy with GNU General Public License v3.0 | 5 votes |
def iscale_prefactor(self, prefactor): """``self *= prefactor`` for scalar `prefactor`. Note that we allow the type of `self` to change if necessary. """ if not np.isscalar(prefactor): raise ValueError("prefactor is not scalar: {0!r}".format(type(prefactor))) if prefactor == 0.: self._data = [] self._qdata = np.empty((0, self.rank), np.intp) self._qdata_sorted = True return self return self.iunary_blockwise(np.multiply, prefactor)
Example #17
Source File: np_conserved.py From tenpy with GNU General Public License v3.0 | 5 votes |
def iadd_prefactor_other(self, prefactor, other): """``self += prefactor * other`` for scalar `prefactor` and :class:`Array` `other`. Note that we allow the type of `self` to change if necessary. Moreover, if `self` and `other` have the same labels in different order, other gets **transposed** before the action. """ if not isinstance(other, Array) or not np.isscalar(prefactor): raise ValueError("wrong argument types: {0!r}, {1!r}".format( type(prefactor), type(other))) self.ibinary_blockwise(np.add, other.__mul__(prefactor)) return self
Example #18
Source File: csr.py From skan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, skeleton_image, *, spacing=1, source_image=None, _buffer_size_offset=None, keep_images=True, unique_junctions=True): graph, coords, degrees = skeleton_to_csgraph(skeleton_image, spacing=spacing, unique_junctions=unique_junctions) if np.issubdtype(skeleton_image.dtype, np.float_): pixel_values = ndi.map_coordinates(skeleton_image, coords.T, order=3) else: pixel_values = None self.graph = graph self.nbgraph = csr_to_nbgraph(graph, pixel_values) self.coordinates = coords self.paths = _build_skeleton_path_graph(self.nbgraph, _buffer_size_offset=_buffer_size_offset) self.n_paths = self.paths.shape[0] self.distances = np.empty(self.n_paths, dtype=float) self._distances_initialized = False self.skeleton_image = None self.source_image = None self.degrees_image = degrees self.degrees = np.diff(self.graph.indptr) self.spacing = (np.asarray(spacing) if not np.isscalar(spacing) else np.full(skeleton_image.ndim, spacing)) if keep_images: self.skeleton_image = skeleton_image self.source_image = source_image
Example #19
Source File: test_footprint_precision.py From buzzard with Apache License 2.0 | 5 votes |
def test_intersection_and_equals_and_of_extent(fp, env): if env < fp._significant_min: pytest.skip() eps = np.abs(fp.coords).max() * 10 ** -buzz.env.significant cwr = itertools.combinations_with_replacement for ax, ay, bx, by in cwr([-eps * LESS_ERROR, 0, +eps * LESS_ERROR], 4): deltas = np.asarray([ax, ay, bx, by]) assert fp.almost_equals(fp & sg.LineString([fp.tl + [ax, ay], fp.br + [bx, by]])) assert fp.almost_equals(fp.of_extent(fp.extent + deltas, fp.scale)) if (np.asarray([ax, ay, bx, by]) != 0).any(): assert fp != fp.of_extent(fp.extent + deltas / LESS_ERROR * MORE_ERROR, fp.scale) for slacka, slackb in itertools.product( [0, -fp.pxvec / np.linalg.norm(fp.pxvec) * eps * MORE_ERROR, -fp.pxlrvec / np.linalg.norm(fp.pxlrvec) * eps * MORE_ERROR, -fp.pxtbvec / np.linalg.norm(fp.pxtbvec) * eps * MORE_ERROR], [0, fp.pxvec / np.linalg.norm(fp.pxvec) * eps * MORE_ERROR, fp.pxlrvec / np.linalg.norm(fp.pxlrvec) * eps * MORE_ERROR, fp.pxtbvec / np.linalg.norm(fp.pxtbvec) * eps * MORE_ERROR], ): if np.isscalar(slacka) and np.isscalar(slackb): continue assert fp != fp.dilate(2) & sg.LineString([fp.tl + slacka, fp.br + slackb])
Example #20
Source File: test_envs.py From adversarial-policies with MIT License | 5 votes |
def test_env(env_from_spec): """Based on Gym smoke test in gym.envs.tests.test_envs.""" ob_space = env_from_spec.observation_space act_space = env_from_spec.action_space ob = env_from_spec.reset() assert ob_space.contains(ob), "Reset observation: {!r} not in space".format(ob) a = act_space.sample() ob, reward, done, _info = env_from_spec.step(a) assert ob_space.contains(ob), "Step observation: {!r} not in space".format(ob) assert isinstance(done, bool), "Expected {} to be a boolean".format(done) if hasattr(env_from_spec, "num_agents"): # multi agent environment assert len(reward) == env_from_spec.num_agents assert isinstance(env_from_spec.observation_space, Tuple), "Observations should be Tuples" assert isinstance(env_from_spec.action_space, Tuple), "Actions should be Tuples" assert len(env_from_spec.observation_space.spaces) == env_from_spec.num_agents assert len(env_from_spec.action_space.spaces) == env_from_spec.num_agents else: # pragma: no cover assert np.isscalar(reward), "{} is not a scalar for {}".format(reward, env_from_spec) for mode in env_from_spec.metadata.get("render.modes", []): env_from_spec.render(mode=mode) # Make sure we can render the environment after close. for mode in env_from_spec.metadata.get("render.modes", []): env_from_spec.render(mode=mode) # Test VecMultiEnv classes
Example #21
Source File: em.py From typhon with MIT License | 5 votes |
def zeeman_transitions(ju, jl, type): """ Find possible mu and ml for valid ju and jl for a given transistion polarization Parameters: ju (scalar): Upper level J jl (scalar): Lower level J type (string): "Pi", "S+", or "S-" for relevant polarization type Returns: tuple: MU, ML arrays for given Js and polarization type """ assert np.isscalar(ju) and np.isscalar(jl), "non-scalar J non supported" assert type.lower() in ["pi", "s+", "s-"], "unknown transition type" assert ju - jl in [-1, 0, 1], "delta-J should belong to {-1, 0, 1}" assert ju > 0 and jl >= 0, "only for positive ju and non-negative for jl" if type.lower() == "pi": J = min(ju, jl) return np.arange(-J, J + 1), np.arange(-J, J + 1) elif type.lower() == "s+": if ju < jl: return np.arange(-ju, ju+1), np.arange(-ju+1, ju+2) elif ju == jl: return np.arange(-ju, ju), np.arange(-ju+1, ju+1) else: return np.arange(-ju, jl), np.arange(-ju+1, jl+1) elif type.lower() == "s-": if ju < jl: return np.arange(-ju, ju+1), np.arange(-jl, ju) elif ju == jl: return np.arange(-ju+1, ju+1), np.arange(-ju, ju) else: return np.arange(-ju+2, ju+1), np.arange(-ju+1, ju)
Example #22
Source File: atlas.py From ibllib with MIT License | 5 votes |
def __init__(self, nxyz, xyz0=[0, 0, 0], dxyz=[1, 1, 1]): if np.isscalar(dxyz): dxyz = [dxyz for i in range(3)] self.x0, self.y0, self.z0 = list(xyz0) self.dx, self.dy, self.dz = list(dxyz) self.nx, self.ny, self.nz = list(nxyz)
Example #23
Source File: at.py From mars with Apache License 2.0 | 5 votes |
def __getitem__(self, indexes): if not isinstance(indexes, tuple): indexes = (indexes,) for index in indexes: if not np.isscalar(index): raise ValueError('Invalid call for scalar access (getting)!') return self._loc[indexes]
Example #24
Source File: getitem.py From mars with Apache License 2.0 | 5 votes |
def series_getitem(series, labels, combine_size=None): if isinstance(labels, list) or np.isscalar(labels): op = SeriesIndex(labels=labels, combine_size=combine_size) return op(series, name=series.name) elif isinstance(labels, _list_like_types) and astensor(labels).dtype == np.bool: return series.loc[labels] else: raise NotImplementedError('type %s is not support for getitem' % type(labels))
Example #25
Source File: agent_utils.py From Counterfactual-StoryRW with MIT License | 5 votes |
def __init__(self, shape=None, low=None, high=None, dtype=None): if low is None: low = -float('inf') if high is None: high = float('inf') if shape is None: low = np.asarray(low) high = np.asarray(high) if low.shape != high.shape: raise ValueError('`low` and `high` must have the same shape.') shape = low.shape else: shape = tuple(shape) if np.isscalar(low): low = low + np.zeros(shape, dtype=dtype) if np.isscalar(high): high = high + np.zeros(shape, dtype=dtype) if shape != low.shape or shape != high.shape: raise ValueError( 'Shape inconsistent: shape={}, low.shape={}, high.shape={}' .format(shape, low.shape, high.shape)) if dtype is None: dtype = low.dtype dtype = np.dtype(dtype) low = low.astype(dtype) high = high.astype(dtype) self._shape = shape self._low = low self._high = high self._dtype = dtype
Example #26
Source File: utils.py From galario with GNU Lesser General Public License v3.0 | 5 votes |
def assert_allclose(x, y, rtol=1e-10, atol=1e-8): """Drop in replacement for `numpy.testing.assert_allclose` that shows the nonmatching elements""" if np.isscalar(x) and np.isscalar(y) == 1: return np.testing.assert_allclose(x, y, rtol=rtol, atol=atol) if x.shape != y.shape: raise AssertionError("Shape mismatch: %s vs %s" % (str(x.shape), str(y.shape))) d = ~np.isclose(x, y, rtol, atol) if np.any(d): miss = np.where(d)[0] raise AssertionError("""Mismatch of %d elements (%g %%) at the level of rtol=%g, atol=%g %s %s %s""" % (len(miss), len(miss)/x.size, rtol, atol, repr(miss), str(x[d]), str(y[d])))
Example #27
Source File: test_numeric.py From recruit with Apache License 2.0 | 5 votes |
def test_scalar_return(self): assert_(np.isscalar(np.isclose(1, 1)))
Example #28
Source File: test_numeric.py From recruit with Apache License 2.0 | 5 votes |
def tst_isclose_allclose(self, x, y): msg = "isclose.all() and allclose aren't same for %s and %s" msg2 = "isclose and allclose aren't same for %s and %s" if np.isscalar(x) and np.isscalar(y): assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
Example #29
Source File: test_losses.py From aboleth with Apache License 2.0 | 5 votes |
def test_map_likelihood(make_graph): """Test for expected output dimensions from deepnet.""" x, y, N, X_, Y_, N_, layers = make_graph nn, reg = layers(X=X_) log_like = tf.distributions.Normal(nn, scale=1.).log_prob(Y_) loss = ab.max_posterior(log_like, reg) tc = tf.test.TestCase() with tc.test_session(): tf.global_variables_initializer().run() L = loss.eval(feed_dict={X_: x, Y_: y}) assert np.isscalar(L)
Example #30
Source File: test_numeric.py From recruit with Apache License 2.0 | 5 votes |
def test_isscalar(self): assert_(np.isscalar(3.1)) assert_(np.isscalar(np.int16(12345))) assert_(np.isscalar(False)) assert_(np.isscalar('numpy')) assert_(not np.isscalar([3.1])) assert_(not np.isscalar(None)) # PEP 3141 from fractions import Fraction assert_(np.isscalar(Fraction(5, 17))) from numbers import Number assert_(np.isscalar(Number()))