Python numpy.NAN Examples
The following are 30
code examples of numpy.NAN().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: regions.py From diluvian with MIT License | 6 votes |
def remask(self): """Reset the mask based on the seeded connected component. """ body = self.to_body() if not body.is_seed_in_mask(): return False new_mask_bin, bounds = body.get_seeded_component(CONFIG.postprocessing.closing_shape) new_mask_bin = new_mask_bin.astype(np.bool) mask_block = self.mask[list(map(slice, bounds[0], bounds[1]))].copy() # Clip any values not in the seeded connected component so that they # cannot not generate moves when rechecking. mask_block[~new_mask_bin] = np.clip(mask_block[~new_mask_bin], None, 0.9 * CONFIG.model.t_move) self.mask[:] = np.NAN self.mask[list(map(slice, bounds[0], bounds[1]))] = mask_block return True
Example #2
Source File: _multivariate.py From lambda-packs with MIT License | 6 votes |
def mean(self, n, p): """ Mean of the Multinomial distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ n, p, npcond = self._process_parameters(n, p) result = n[..., np.newaxis]*p return self._checkresult(result, npcond, np.NAN)
Example #3
Source File: _multivariate.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def mean(self, n, p): """ Mean of the Multinomial distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ n, p, npcond = self._process_parameters(n, p) result = n[..., np.newaxis]*p return self._checkresult(result, npcond, np.NAN)
Example #4
Source File: test_constants.py From chainer with MIT License | 6 votes |
def test_constants(): assert chainerx.Inf is numpy.Inf assert chainerx.Infinity is numpy.Infinity assert chainerx.NAN is numpy.NAN assert chainerx.NINF is numpy.NINF assert chainerx.NZERO is numpy.NZERO assert chainerx.NaN is numpy.NaN assert chainerx.PINF is numpy.PINF assert chainerx.PZERO is numpy.PZERO assert chainerx.e is numpy.e assert chainerx.euler_gamma is numpy.euler_gamma assert chainerx.inf is numpy.inf assert chainerx.infty is numpy.infty assert chainerx.nan is numpy.nan assert chainerx.newaxis is numpy.newaxis assert chainerx.pi is numpy.pi
Example #5
Source File: unscented.py From bayestsa with Apache License 2.0 | 6 votes |
def __init__(self, x0, P0, Q, R, cor, f, h): self.Q = Q self.R = R self.cor = cor self.fa = lambda col: f(col[0], col[2]) self.ha = lambda col: h(col[0], col[1]) Pxx = P0 Pxv = 0. self.xa = np.array( ((x0,), (0.,), (0.,), (0.,)) ) self.Pa = np.array( ((Pxx, Pxv , 0. , 0. ), (Pxv, self.R, 0. , 0. ), (0. , 0. , self.Q , self.cor), (0. , 0. , self.cor, self.R )) ) self.lastobservation = np.NAN self.predictedobservation = np.NAN self.innov = np.NAN self.innovcov = np.NAN self.gain = np.NAN self.loglikelihood = 0.0
Example #6
Source File: site_stability.py From CatLearn with GNU General Public License v3.0 | 6 votes |
def get_site_index(material, defect): """ Given two trajectories with equal atom positions and one atom difference, determines the site index of the defect site. :param material: :param defect: :return: site index integer """ matlist = material.get_positions() deflist = defect.get_positions() site_detected = [] for pos in matlist: boollist = [np.allclose(pos, defpos, rtol=1e-03) for defpos in deflist] site_detected.append(any(boollist)) site_idx = [idx for idx, _ in enumerate(site_detected) if not _] if len(site_idx) == 0: site_idx = [np.NAN] return site_idx[0]
Example #7
Source File: site_stability.py From CatLearn with GNU General Public License v3.0 | 6 votes |
def get_DFT_site_stability(self, site): """ Computes site stability based on material, defect and reference dict. :param site: SiteFeaturizer site. :return: Site stability in eV. """ e_mat = site['material'].total_energy atom = site['material'].atoms[site['site_index']].symbol e_atom = self.reference_dict[atom] e_def = site['defect'].total_energy try: e_site = e_mat - e_def - e_atom except: # print('Check; site may not be converged: \n') # print(site) # print('\n') e_site = np.NAN return e_site
Example #8
Source File: _data.py From spinmob with GNU General Public License v3.0 | 6 votes |
def _format_value_error(self, v, e, pm=" +/- "): """ Returns a string v +/- e with the right number of sig figs. """ # If we have weird stuff if not _s.fun.is_a_number(v) or not _s.fun.is_a_number(e) \ or v in [_n.inf, _n.nan, _n.NAN] or e in [_n.inf, _n.nan, _n.NAN]: return str(v)+pm+str(e) # Normal values. try: sig_figs = -int(_n.floor(_n.log10(abs(e))))+1 return str(_n.round(v, sig_figs)) + pm + str(_n.round(e, sig_figs)) except: return str(v)+pm+str(e)
Example #9
Source File: diagnostics.py From pliers with BSD 3-Clause "New" or "Revised" License | 6 votes |
def mahalanobis_distances(df, axis=0): ''' Returns a pandas Series with Mahalanobis distances for each sample on the axis. Note: does not work well when # of observations < # of dimensions Will either return NaN in answer or (in the extreme case) fail with a Singular Matrix LinAlgError Args: df: pandas DataFrame with columns to run diagnostics on axis: 0 to find outlier rows, 1 to find outlier columns ''' df = df.transpose() if axis == 1 else df means = df.mean() try: inv_cov = np.linalg.inv(df.cov()) except LinAlgError: return pd.Series([np.NAN] * len(df.index), df.index, name='Mahalanobis') dists = [] for i, sample in df.iterrows(): dists.append(mahalanobis(sample, means, inv_cov)) return pd.Series(dists, df.index, name='Mahalanobis')
Example #10
Source File: bouncyhouse.py From technical with GNU General Public License v3.0 | 6 votes |
def bounce(dataframe: DataFrame, level): """ :param dataframe: :param level: :return: 1 if it bounces up 0 if no bounce -1 if it bounces below """ from scipy.ndimage.interpolation import shift open = dataframe['open'] close = dataframe['close'] touch = shift(touches(dataframe, level), 1, cval=np.NAN) return np.vectorize(_bounce)(open, close, level, touch)
Example #11
Source File: estimation.py From velocyto.py with BSD 2-Clause "Simplified" License | 6 votes |
def _fit1_slope(y: np.ndarray, x: np.ndarray) -> float: """Simple function that fit a linear regression model without intercept """ if not np.any(x): m = np.NAN # It is definetelly not at steady state!!! elif not np.any(y): m = 0 else: result, rnorm = scipy.optimize.nnls(x[:, None], y) # Fastest but costrains result >= 0 m = result[0] # Second fastest: m, _ = scipy.optimize.leastsq(lambda m: x*m - y, x0=(0,)) # Third fastest: m = scipy.optimize.minimize_scalar(lambda m: np.sum((x*m - y)**2 )).x # Before I was doinf fastest: scipy.optimize.minimize_scalar(lambda m: np.sum((y - m * x)**2), bounds=(0, 3), method="bounded").x # Optionally one could clip m if high value make no sense # m = np.clip(m,0,3) return m
Example #12
Source File: estimation.py From velocyto.py with BSD 2-Clause "Simplified" License | 6 votes |
def _fit1_slope_weighted(y: np.ndarray, x: np.ndarray, w: np.ndarray, limit_gamma: bool=False, bounds: Tuple[float, float]=(0, 20)) -> float: """Simple function that fit a weighted linear regression model without intercept """ if not np.any(x): m = np.NAN # It is definetelly not at steady state!!! elif not np.any(y): m = 0 else: if limit_gamma: if np.median(y) > np.median(x): high_x = x > np.percentile(x, 90) up_gamma = np.percentile(y[high_x], 10) / np.median(x[high_x]) up_gamma = np.maximum(1.5, up_gamma) else: up_gamma = 1.5 # Just a bit more than 1 m = scipy.optimize.minimize_scalar(lambda m: np.sum(w * (x * m - y)**2), bounds=(1e-8, up_gamma), method="bounded").x else: m = scipy.optimize.minimize_scalar(lambda m: np.sum(w * (x * m - y)**2), bounds=bounds, method="bounded").x return m
Example #13
Source File: estimation.py From velocyto.py with BSD 2-Clause "Simplified" License | 6 votes |
def _fit1_slope_offset(y: np.ndarray, x: np.ndarray, fixperc_q: bool=False) -> Tuple[float, float]: """Simple function that fit a linear regression model with intercept """ if not np.any(x): m = (np.NAN, 0) # It is definetelly not at steady state!!! elif not np.any(y): m = (0, 0) else: # result, rnorm = scipy.optimize.nnls(x[:, None], y) # Fastest but costrains result >= 0 # m = result[0] if fixperc_q: m1 = np.percentile(y[x <= np.percentile(x, 1)], 50) m0 = scipy.optimize.minimize_scalar(lambda m: np.sum((x * m - y + m1)**2), bounds=(0, 20), method="bounded").x m = (m0, m1) else: m, _ = scipy.optimize.leastsq(lambda m: -y + x * m[0] + m[1], x0=(0, 0)) # Third fastest: m = scipy.optimize.minimize_scalar(lambda m: np.sum((x*m - y)**2 )).x # Before I was doinf fastest: scipy.optimize.minimize_scalar(lambda m: np.sum((y - m * x)**2), bounds=(0, 3), method="bounded").x # Optionally one could clip m if high value make no sense # m = np.clip(m,0,3) return m[0], m[1]
Example #14
Source File: _multivariate.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def mean(self, n, p): """ Mean of the Multinomial distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ n, p, npcond = self._process_parameters(n, p) result = n[..., np.newaxis]*p return self._checkresult(result, npcond, np.NAN)
Example #15
Source File: functions.py From quantipy with MIT License | 5 votes |
def dichotomous_set_value_counts(data): ''' Now supports weighted aggregation. ''' cdata = data[data.columns[:-1]].replace(2, np.NAN).mul(data[data.columns[-1]], axis=0) df = pd.DataFrame(pd.concat([cdata.sum(), pd.Series({'All': cdata.T.count().count()})])) df.columns = ['@1'] return df #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Example #16
Source File: test_armax.py From SIPPY with GNU Lesser General Public License v3.0 | 5 votes |
def test_exceptions(self): assert_raises(ValueError, Armax, "invalid", 2, 3, 4, 5) assert_raises(ValueError, Armax, [1.5, 2], 2, 3, 4, 5) assert_raises(ValueError, Armax, (np.NAN, 1), 2, 3, 4, 5)
Example #17
Source File: my_types.py From pydem with Apache License 2.0 | 5 votes |
def inpaint(self): """ Replace masked-out elements in an array using an iterative image inpainting algorithm. """ import inpaint filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np.float32), 3, 0.01, 2) self.raster_data = np.ma.masked_invalid(filled)
Example #18
Source File: candles.py From technical with GNU General Public License v3.0 | 5 votes |
def heikinashi(bars): bars = bars.copy() bars['ha_close'] = (bars['open'] + bars['high'] + bars['low'] + bars['close']) / 4 bars['ha_open'] = (bars['open'].shift(1) + bars['close'].shift(1)) / 2 bars.loc[:1, 'ha_open'] = bars['open'].values[0] for x in range(2): bars.loc[1:, 'ha_open'] = ( (bars['ha_open'].shift(1) + bars['ha_close'].shift(1)) / 2)[1:] bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1) bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1) result = pd.DataFrame( index=bars.index, data={ 'open': bars['ha_open'], 'high': bars['ha_high'], 'low': bars['ha_low'], 'close': bars['ha_close']}) # usefull little helpers result['flat_bottom'] = np.vectorize(_flat_bottom)(result['close'], result['low'], result['open'], result['high']) result['flat_top'] = np.vectorize(_flat_top)(result['close'], result['low'], result['open'], result['high']) result['small_body'] = np.vectorize(_small_body)(result['close'], result['low'], result['open'], result['high']) result['candle'] = np.vectorize(_candle_type)(result['open'], result['close']) result['reversal'] = np.vectorize(_reversal)(result['candle'], shift(result['candle'], 1, cval=np.NAN)) result['lower_wick'] = np.vectorize(_wick_length)(result['close'], result['low'], result['open'], result['high'], False) result['upper_wick'] = np.vectorize(_wick_length)(result['close'], result['low'], result['open'], result['high'], True) return result
Example #19
Source File: _multivariate.py From lambda-packs with MIT License | 5 votes |
def logpmf(self, x, n, p): """ Log of the Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- logpmf : ndarray or scalar Log of the probability mass function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ n, p, npcond = self._process_parameters(n, p) x, xcond = self._process_quantiles(x, n, p) result = self._logpmf(x, n, p) # replace values for which x was out of the domain; broadcast # xcond to the right shape xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) result = self._checkresult(result, xcond_, np.NINF) # replace values bad for n or p; broadcast npcond to the right shape npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) return self._checkresult(result, npcond_, np.NAN)
Example #20
Source File: test_content.py From cupy with MIT License | 5 votes |
def check_unary_nan(self, name, xp, dtype): a = xp.array( [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, numpy.inf], dtype=dtype) return getattr(xp, name)(a)
Example #21
Source File: estimation.py From velocyto.py with BSD 2-Clause "Simplified" License | 5 votes |
def _fit1_slope_weighted_offset(y: np.ndarray, x: np.ndarray, w: np.ndarray, fixperc_q: bool=False, limit_gamma: bool=False) -> Tuple[float, float]: """Function that fits a weighted linear regression model with intercept with some adhoc """ if not np.any(x): m = (np.NAN, 0) # It is definetelly not at steady state!!! elif not np.any(y): m = (0, 0) else: if fixperc_q: m1 = np.percentile(y[x <= np.percentile(x, 1)], 50) m0 = scipy.optimize.minimize_scalar(lambda m: np.sum(w * (x * m - y + m1)**2), bounds=(0, 20), method="bounded").x m = (m0, m1) else: # m, _ = scipy.optimize.leastsq(lambda m: np.sqrt(w) * (-y + x * m[0] + m[1]), x0=(0, 0)) # This is probably faster but it can have negative slope # NOTE: The up_gamma is to deal with cases where consistently y > x. Those should have positive velocity everywhere if limit_gamma: if np.median(y) > np.median(x): high_x = x > np.percentile(x, 90) up_gamma = np.percentile(y[high_x], 10) / np.median(x[high_x]) up_gamma = np.maximum(1.5, up_gamma) else: up_gamma = 1.5 # Just a bit more than 1 else: up_gamma = 20 up_q = 2 * np.sum(y * w) / np.sum(w) m = scipy.optimize.minimize(lambda m: np.sum(w * (-y + x * m[0] + m[1])**2), x0=(0.1, 1e-16), method="L-BFGS-B", bounds=[(1e-8, up_gamma), (0, up_q)]).x # If speedup is needed either the gradient or numexpr could be used return m[0], m[1]
Example #22
Source File: equitycountrymodels.py From systematictradingexamples with GNU General Public License v2.0 | 5 votes |
def sign(x): if x is None: return None if np.isnan(x): return np.NAN if x==0: return 0.0 if x<0: return -1.0 if x>0: return 1.0
Example #23
Source File: test_multivariate.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_logpmf(self): vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7)) assert_allclose(vals1, -1.483270127243324, rtol=1e-8) vals2 = multinomial.logpmf([3, 4], 0, [.3, .7]) assert_allclose(vals2, np.NAN, rtol=1e-8) vals3 = multinomial.logpmf([3, 4], 0, [-2, 3]) assert_allclose(vals3, np.NAN, rtol=1e-8)
Example #24
Source File: load.py From bifacial_radiance with BSD 3-Clause "New" or "Revised" License | 5 votes |
def cleanResult(resultsDF, matchers=None): """ Replace irradiance values with NaN's when the scan intersects ground, sky, or anything in `matchers`. Matchers are words in the dataframe like 'sky' or 'tube' in the front or back material description column that get substituted by NaN in Wm2Front and Wm2Back There are default matchers established in this routine but other matchers can be passed. Default matchers: 'sky', 'tube', 'pole', 'ground', '3267', '1540'. Matchers 3267 and 1540 is to get rid of inner-sides of the module. Parameters ---------- resultsDF : :py:class:`~pandas.DataFrame` DataFrame of results from bifacial_radiance, for example read from :py:class:`~bifacial_radiance.load.read1Result` Returns -------- resultsDF : :py:class:`~pandas.DataFrame` Updated resultsDF """ import numpy as np if matchers is None: matchers = ['sky','pole','tube','bar','ground', '3267', '1540'] NaNindex = [i for i,s in enumerate(resultsDF['mattype']) if any(xs in s for xs in matchers)] NaNindex2 = [i for i,s in enumerate(resultsDF['rearMat']) if any(xs in s for xs in matchers)] #NaNindex += [i for i,s in enumerate(frontDict['mattype']) if any(xs in s for xs in matchers)] for i in NaNindex: resultsDF['Wm2Front'].loc[i] = np.NAN for i in NaNindex2: resultsDF['Wm2Back'].loc[i] = np.NAN return resultsDF
Example #25
Source File: _multivariate.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def logpmf(self, x, n, p): """ Log of the Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- logpmf : ndarray or scalar Log of the probability mass function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ n, p, npcond = self._process_parameters(n, p) x, xcond = self._process_quantiles(x, n, p) result = self._logpmf(x, n, p) # replace values for which x was out of the domain; broadcast # xcond to the right shape xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) result = self._checkresult(result, xcond_, np.NINF) # replace values bad for n or p; broadcast npcond to the right shape npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) return self._checkresult(result, npcond_, np.NAN)
Example #26
Source File: _multivariate.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def logpmf(self, x, n, p): """ Log of the Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- logpmf : ndarray or scalar Log of the probability mass function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ n, p, npcond = self._process_parameters(n, p) x, xcond = self._process_quantiles(x, n, p) result = self._logpmf(x, n, p) # replace values for which x was out of the domain; broadcast # xcond to the right shape xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) result = self._checkresult(result, xcond_, np.NINF) # replace values bad for n or p; broadcast npcond to the right shape npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) return self._checkresult(result, npcond_, np.NAN)
Example #27
Source File: generation.py From bayestsa with Apache License 2.0 | 5 votes |
def generate(self): self.__validate() self.__generatenoises() self.__generatejumps() processcount = len(self.__data._processnames) self.__data._processes = np.empty((self.__data._timecount, processcount)) self.__data._processes[:] = np.NAN for time in range(self.__data._timecount): for pi, (pn, pf) in enumerate(zip(self.__data._processnames, self.__processfuncs)): self.__data._processes[time, pi] = pf(time, pn, self.__data) return self.__data.copy()
Example #28
Source File: collections.py From bayestsa with Apache License 2.0 | 5 votes |
def tonumpyarray(self, fill=None, symmetric=False): import numpy as np if fill is None: fill = np.NAN res = np.empty((self.__dim, self.__dim)) idx = 0 for i in range(self.__dim): for j in range(i+1): res[i,j] = self._data[idx] if symmetric: res[j,i] = res[i,j] idx += 1 if not symmetric: res[i,i+1:self.__dim] = fill return res
Example #29
Source File: conditionalforecast.py From systematictradingexamples with GNU General Public License v2.0 | 5 votes |
def sign(x): if x is None: return None if np.isnan(x): return np.NAN if x==0: return 0.0 if x<0: return -1.0 if x>0: return 1.0
Example #30
Source File: blending.py From uchroma with GNU Lesser General Public License v3.0 | 5 votes |
def _compose_alpha(img_in, img_layer, opacity: float=1.0): """ Calculate alpha composition ratio between two images. """ comp_alpha = np.minimum(img_in[:, :, 3], img_layer[:, :, 3]) * opacity new_alpha = img_in[:, :, 3] + (1.0 - img_in[:, :, 3]) * comp_alpha np.seterr(divide='ignore', invalid='ignore') ratio = comp_alpha / new_alpha ratio[ratio == np.NAN] = 0.0 return ratio