Python scipy.stats.scoreatpercentile() Examples
The following are 30
code examples of scipy.stats.scoreatpercentile().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.stats
, or try the search function
.
Example #1
Source File: tedana.py From me-ica with GNU Lesser General Public License v2.1 | 6 votes |
def makeadmask(cdat,min=True,getsum=False): nx,ny,nz,Ne,nt = cdat.shape mask = np.ones((nx,ny,nz),dtype=np.bool) if min: mask = cdat[:,:,:,:,:].prod(axis=-1).prod(-1)!=0 return mask else: #Make a map of longest echo that a voxel can be sampled with, #with minimum value of map as X value of voxel that has median #value in the 1st echo. N.b. larger factor leads to bias to lower TEs emeans = cdat[:,:,:,:,:].mean(-1) medv = emeans[:,:,:,0] == stats.scoreatpercentile(emeans[:,:,:,0][emeans[:,:,:,0]!=0],33,interpolation_method='higher') lthrs = np.squeeze(np.array([ emeans[:,:,:,ee][medv]/3 for ee in range(Ne) ])) if len(lthrs.shape)==1: lthrs = np.atleast_2d(lthrs).T lthrs = lthrs[:,lthrs.sum(0).argmax()] mthr = np.ones([nx,ny,nz,ne]) for ee in range(Ne): mthr[:,:,:,ee]*=lthrs[ee] mthr = np.abs(emeans[:,:,:,:])>mthr masksum = np.array(mthr,dtype=np.int).sum(-1) mask = masksum!=0 if getsum: return mask,masksum else: return mask
Example #2
Source File: gradient_boosting.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred gamma = self.gamma if gamma is None: if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma if sample_weight is None: sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / y.shape[0] else: sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * sample_weight[~gamma_mask] * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / sample_weight.sum() return loss
Example #3
Source File: outliers.py From visualqc with Apache License 2.0 | 6 votes |
def run_isolation_forest(features, id_list, fraction_of_outliers=.3): """Performs anomaly detection based on Isolation Forest.""" rng = np.random.RandomState(1984) num_samples = features.shape[0] iso_f = IsolationForest(max_samples=num_samples, contamination=fraction_of_outliers, random_state=rng) iso_f.fit(features) pred_scores = iso_f.decision_function(features) threshold = stats.scoreatpercentile(pred_scores, 100 * fraction_of_outliers) outlying_ids = id_list[pred_scores < threshold] return outlying_ids
Example #4
Source File: univariate_selection.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def _get_support_mask(self): check_is_fitted(self, 'scores_') # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=np.bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=np.bool) scores = _clean_nans(self.scores_) treshold = stats.scoreatpercentile(scores, 100 - self.percentile) mask = scores > treshold ties = np.where(scores == treshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask
Example #5
Source File: test_stats.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_axis(self): scoreatperc = stats.scoreatpercentile x = arange(12).reshape(3, 4) assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0]) r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0) r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]] assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1) x = array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]]) score = stats.scoreatpercentile(x, 50) assert_equal(score.shape, ()) assert_equal(score, 1.0) score = stats.scoreatpercentile(x, 50, axis=0) assert_equal(score.shape, (3,)) assert_equal(score, [1, 1, 1])
Example #6
Source File: test_stats.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_sequence_per(self): x = arange(8) * 0.5 expected = np.array([0, 3.5, 1.75]) res = stats.scoreatpercentile(x, [0, 100, 50]) assert_allclose(res, expected) assert_(isinstance(res, np.ndarray)) # Test with ndarray. Regression test for gh-2861 assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])), expected) # Also test combination of 2-D array, axis not None and array-like per res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)), np.array([0, 1, 100, 100]), axis=1) expected2 = array([[0, 4, 8], [0.03, 4.03, 8.03], [3, 7, 11], [3, 7, 11]]) assert_allclose(res2, expected2)
Example #7
Source File: test_stats.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_lower_higher(self): scoreatperc = stats.scoreatpercentile # interpolation_method 'lower'/'higher' assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='higher'), 5) assert_equal(scoreatperc(list(range(10)), 50, (2,7), interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7), interpolation_method='higher'), 5) assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='higher'), 5) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100), interpolation_method='lower'), 10) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100), interpolation_method='higher'), 100) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10), interpolation_method='lower'), 1) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10), interpolation_method='higher'), 10)
Example #8
Source File: test_stats.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_fraction(self): scoreatperc = stats.scoreatpercentile # Test defaults assert_equal(scoreatperc(list(range(10)), 50), 4.5) assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5) assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5) # explicitly specify interpolation_method 'fraction' (the default) assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100), interpolation_method='fraction'), 55) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10), interpolation_method='fraction'), 5.5)
Example #9
Source File: bandwidths.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def _select_sigma(X): """ Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0. References ---------- Silverman (1986) p.47 """ # normalize = norm.ppf(.75) - norm.ppf(.25) normalize = 1.349 # IQR = np.subtract.reduce(percentile(X, [75,25], # axis=axis), axis=axis)/normalize IQR = (sap(X, 75) - sap(X, 25))/normalize return np.minimum(np.std(X, axis=0, ddof=1), IQR) ## Univariate Rule of Thumb Bandwidths ##
Example #10
Source File: test_stats.py From Computable with MIT License | 6 votes |
def test_lower_higher(self): scoreatperc = stats.scoreatpercentile # interpolation_method 'lower'/'higher' assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='higher'), 5) assert_equal(scoreatperc(list(range(10)), 50, (2,7), interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7), interpolation_method='higher'), 5) assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='higher'), 5) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100), interpolation_method='lower'), 10) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100), interpolation_method='higher'), 100) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10), interpolation_method='lower'), 1) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10), interpolation_method='higher'), 10)
Example #11
Source File: test_stats.py From Computable with MIT License | 6 votes |
def test_fraction(self): scoreatperc = stats.scoreatpercentile # Test defaults assert_equal(scoreatperc(list(range(10)), 50), 4.5) assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5) assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5) # explicitly specify interpolation_method 'fraction' (the default) assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100), interpolation_method='fraction'), 55) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10), interpolation_method='fraction'), 5.5)
Example #12
Source File: ABuTLExecute.py From abu with GNU General Public License v3.0 | 6 votes |
def find_percent_point(percents, y): """ 可视化技术线比例分割的区域, 针对输入的比例迭代操作后 分别使用stats.scoreatpercentile和 (y.max() - y.min()) * pt + y.min()两种 方式进行计算的分割值, 返回对象为比例值为key的字典对象 eg: input: percents = (0.1, 0.9) output: {0.1: (15.732749999999999, 15.5075), 0.9: (31.995000000000005, 34.387500000000003)} :param percents: 可迭代序列,eg: (0.1, 0.9), [0.3, 0,4, 0.8] :param y: 计算分割线的序列 :return: 比例值为key的字典对象 """ percent_point_dict = {pt: (stats.scoreatpercentile(y, np.round(pt * 100, 1)), (y.max() - y.min()) * pt + y.min()) for pt in percents} return percent_point_dict # noinspection PyTypeChecker
Example #13
Source File: ABuTLExecute.py From abu with GNU General Public License v3.0 | 6 votes |
def find_golden_point_ex(x, y, show=False): """统计黄金分割计算方法,以及对应简单可视化操作""" sp382 = stats.scoreatpercentile(y, 38.2) sp618 = stats.scoreatpercentile(y, 61.8) sp50 = stats.scoreatpercentile(y, 50.0) if show: with plt_show(): # 可视化操作 plt.plot(x, y) plt.axhline(sp50, color='c') plt.axhline(sp618, color='r') plt.axhline(sp382, color='g') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend(['TLine', 'sp50', 'sp618', 'sp382'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) return sp382, sp50, sp618
Example #14
Source File: c5.py From abu with GNU General Public License v3.0 | 6 votes |
def sample_571_1(): """ 5.7.1 黄金分割线的定义方式 :return: """ # 收盘价格序列中的最大值 cs_max = tsla_df.close.max() # 收盘价格序列中的最小值 cs_min = tsla_df.close.min() sp382 = (cs_max - cs_min) * 0.382 + cs_min sp618 = (cs_max - cs_min) * 0.618 + cs_min print('视觉上的382: ' + str(round(sp382, 2))) print('视觉上的618: ' + str(round(sp618, 2))) sp382_stats = stats.scoreatpercentile(tsla_df.close, 38.2) sp618_stats = stats.scoreatpercentile(tsla_df.close, 61.8) print('统计上的382: ' + str(round(sp382_stats, 2))) print('统计上的618: ' + str(round(sp618_stats, 2))) # noinspection PyTypeChecker
Example #15
Source File: utils.py From ocs-ci with MIT License | 6 votes |
def get_trim_mean(values, percentage=20): """ Get the trimmed mean of a list of values. Explanation: This function finds the arithmetic mean of given values, ignoring values outside the given limits. Args: values (list): The list of values percentage (int): The percentage to be trimmed Returns: float: Trimmed mean. In case trimmed mean calculation fails, the regular mean average is returned """ lower_limit = scoreatpercentile(values, percentage) upper_limit = scoreatpercentile(values, 100 - percentage) try: return tmean(values, limits=(lower_limit, upper_limit)) except ValueError: log.warning( f"Failed to calculate the trimmed mean of {values}. The " f"Regular mean average will be calculated instead" ) return sum(values) / len(values)
Example #16
Source File: tedana.py From me-ica with GNU Lesser General Public License v2.1 | 6 votes |
def scoreatpercentile(a, per, limit=(), interpolation_method='lower'): """ This function is grabbed from scipy """ values = np.sort(a, axis=0) if limit: values = values[(limit[0] <= values) & (values <= limit[1])] idx = per /100. * (values.shape[0] - 1) if (idx % 1 == 0): score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[int(np.floor(idx))] elif interpolation_method == 'higher': score = values[int(np.ceil(idx))] else: raise ValueError("interpolation_method can only be 'fraction', " \ "'lower' or 'higher'") return score
Example #17
Source File: t2smap.py From me-ica with GNU Lesser General Public License v2.1 | 6 votes |
def scoreatpercentile(a, per, limit=(), interpolation_method='lower'): """ This function is grabbed from scipy """ values = np.sort(a, axis=0) if limit: values = values[(limit[0] <= values) & (values <= limit[1])] idx = per /100. * (values.shape[0] - 1) if (idx % 1 == 0): score = values[idx] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction', " \ "'lower' or 'higher'") return score
Example #18
Source File: bandwidths.py From vnpy_crypto with MIT License | 6 votes |
def _select_sigma(X): """ Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0. References ---------- Silverman (1986) p.47 """ # normalize = norm.ppf(.75) - norm.ppf(.25) normalize = 1.349 # IQR = np.subtract.reduce(percentile(X, [75,25], # axis=axis), axis=axis)/normalize IQR = (sap(X, 75) - sap(X, 25))/normalize return np.minimum(np.std(X, axis=0, ddof=1), IQR) ## Univariate Rule of Thumb Bandwidths ##
Example #19
Source File: gradient_boosting.py From twitter-stock-recommendation with MIT License | 6 votes |
def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred gamma = self.gamma if gamma is None: if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma if sample_weight is None: sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / y.shape[0] else: sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * sample_weight[~gamma_mask] * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / sample_weight.sum() return loss
Example #20
Source File: univariate_selection.py From twitter-stock-recommendation with MIT License | 6 votes |
def _get_support_mask(self): check_is_fitted(self, 'scores_') # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=np.bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=np.bool) scores = _clean_nans(self.scores_) treshold = stats.scoreatpercentile(scores, 100 - self.percentile) mask = scores > treshold ties = np.where(scores == treshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask
Example #21
Source File: gradient_boosting.py From twitter-stock-recommendation with MIT License | 5 votes |
def fit(self, X, y, sample_weight=None): if sample_weight is None: self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0) else: self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
Example #22
Source File: gradient_boosting.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def fit(self, X, y, sample_weight=None): if sample_weight is None: self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0) else: self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
Example #23
Source File: WOE_IV.py From exploripy with MIT License | 5 votes |
def discrete(self, x): ''' Discrete the input 1-D numpy array using 5 equal percentiles :param x: 1-D numpy array :return: discreted 1-D numpy array ''' res = np.array([0] * x.shape[-1], dtype=int) for i in range(5): point1 = stats.scoreatpercentile(x, i * 20) point2 = stats.scoreatpercentile(x, (i + 1) * 20) x1 = x[np.where((x >= point1) & (x <= point2))] mask = np.in1d(x, x1) res[mask] = (i + 1) return res
Example #24
Source File: gradient_boosting.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def negative_gradient(self, y, pred, sample_weight=None, **kargs): pred = pred.ravel() diff = y - pred if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma residual = np.zeros((y.shape[0],), dtype=np.float64) residual[gamma_mask] = diff[gamma_mask] residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask]) self.gamma = gamma return residual
Example #25
Source File: descriptivestats.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _percentiles(self,x): p = [stats.scoreatpercentile(x,per) for per in (1,5,10,25,50,75,90,95,99)] return p
Example #26
Source File: gradient_boosting.py From twitter-stock-recommendation with MIT License | 5 votes |
def negative_gradient(self, y, pred, sample_weight=None, **kargs): pred = pred.ravel() diff = y - pred if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma residual = np.zeros((y.shape[0],), dtype=np.float64) residual[gamma_mask] = diff[gamma_mask] residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask]) self.gamma = gamma return residual
Example #27
Source File: estimators.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def fitquantilesgmm(distfn, x, start=None, pquant=None, frozen=None): if pquant is None: pquant = np.array([0.01, 0.05,0.1,0.4,0.6,0.9,0.95,0.99]) if start is None: if hasattr(distfn, '_fitstart'): start = distfn._fitstart(x) else: start = [1]*distfn.numargs + [0.,1.] #TODO: vectorize this: xqs = [stats.scoreatpercentile(x, p) for p in pquant*100] mom2s = None parest = optimize.fmin(lambda params:np.sum( momentcondquant(distfn, params, mom2s,(pquant,xqs), shape=None)**2), start) return parest
Example #28
Source File: gmm.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def __init__(self, endog, exog, instrument, **kwds): #TODO: something wrong with super super(DistQuantilesGMM, self).__init__(endog, exog, instrument) #self.func = func self.epsilon_iter = 1e-5 self.distfn = kwds['distfn'] #done by super doesn't work yet #TypeError: super does not take keyword arguments self.endog = endog #make this optional for fit if not 'pquant' in kwds: self.pquant = pquant = np.array([0.01, 0.05,0.1,0.4,0.6,0.9,0.95,0.99]) else: self.pquant = pquant = kwds['pquant'] #TODO: vectorize this: use edf self.xquant = np.array([stats.scoreatpercentile(endog, p) for p in pquant*100]) self.nmoms = len(self.pquant) #TODOcopied from GMM, make super work self.endog = endog self.exog = exog self.instrument = instrument self.results = GMMResults(model=self) #self.__dict__.update(kwds) self.epsilon_iter = 1e-6
Example #29
Source File: quantile_regression.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def prsquared(self): q = self.q endog = self.model.endog e = self.resid e = np.where(e < 0, (1 - q) * e, q * e) e = np.abs(e) ered = endog - stats.scoreatpercentile(endog, q * 100) ered = np.where(ered < 0, (1 - q) * ered, q * ered) ered = np.abs(ered) return 1 - np.sum(e) / np.sum(ered) #@cache_readonly
Example #30
Source File: remap.py From sarpy with MIT License | 5 votes |
def nrl(x, a=1, c=220): """ Lin-log style remap. Parameters ---------- x : numpy.ndarray data to remap a : float scale factor of 99th percentile for input "knee" c : float output "knee" in lin-log curve Returns ------- numpy.ndarray """ x = numpy.abs(x) xmin = numpy.min(x) p99 = prctile(x[numpy.isfinite(x)], 99) b = (255 - c) / numpy.log10((numpy.max(x) - xmin) / ((a * p99) - xmin)) out = numpy.zeros_like(x, numpy.uint8) linear_region = (x <= a*p99) out[linear_region] = (x[linear_region] - xmin) * c / ((a * p99) - xmin) out[numpy.logical_not(linear_region)] = c + (b * numpy.log10((x[numpy.logical_not(linear_region)] - xmin) / ((a * p99) - xmin))) return out