Python numpy.expm1() Examples
The following are 30
code examples of numpy.expm1().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: S1AreaFractionTopProbability.py From pax with BSD 3-Clause "New" or "Revised" License | 6 votes |
def bdtrc(k, n, p): if (k < 0): return (1.0) if (k == n): return (0.0) dn = n - k if (k == 0): if (p < .01): dk = -np.expm1(dn * np.log1p(-p)) else: dk = 1.0 - np.exp(dn * np.log(1.0 - p)) else: dk = k + 1 dk = betainc(dk, dn, p) return dk
Example #2
Source File: test_end_to_end.py From mercari-solution with MIT License | 6 votes |
def _test(vectorizer, model, n_rows): tr = load_train('tests/train_10k.tsv') tr, va = train_test_split(tr) te = pd.read_csv('tests/test_10k_corrupted.tsv', sep="\t") if n_rows is not None: if n_rows == 'random': n_rows = np.random.randint(1, te.shape[0]) te = te.sample(n=n_rows) mat_tr = vectorizer.fit_transform(tr, tr.price) mat_te = vectorizer.transform(te.copy()) mat_va = vectorizer.transform(va) model.fit(mat_tr, np.log1p(tr.price)) assert rmsle(np.expm1(model.predict(mat_va)), va.price) < 0.85 te_preds = np.expm1(model.predict(mat_te)) assert te_preds.shape[0] == te.shape[0] assert np.all(np.isfinite(te_preds)) assert te_preds.min() >= -1, "min price is {}".format(te_preds.min()) assert te_preds.max() <= 3000, "max price is {}".format(te_preds.max())
Example #3
Source File: frank.py From copulae with MIT License | 6 votes |
def ipsi(self, u, log=False): r = np.asarray(u) * self.params res = np.copy(r) res[np.isnan(r)] = np.nan em = np.expm1(-self.params) # for small inputs, u <= 0.01 small_mask = np.abs(r) <= 0.01 * abs(self.params) res[small_mask] = -np.log(np.expm1(-r[small_mask]) / em) big_mask = np.abs(r) > 0.01 * abs(self.params) e = np.exp(-self.params) mid_mask = (e > 0) & (np.abs(self.params - r) < 0.5) # theta * (1 - u) < 0.5 m1 = big_mask & mid_mask m2 = big_mask & ~mid_mask r[m1] = -np.log1p(e * np.expm1((self.params - r[m1])) / em) r[m2] = -np.log1p((np.exp(-r[m2]) - e) / em) return np.log(r) if log else r
Example #4
Source File: frank.py From copulae with MIT License | 6 votes |
def random(self, n: int, seed: int = None): u = random_uniform(n, self.dim, seed) if abs(self.params) < 1e-7: return u if self.dim == 2: v = u[:, 1] a = -abs(self.params) v = -1 / a * np.log1p(-v * np.expm1(-a) / (np.exp(-a * u[:, 0]) * (v - 1) - v)) u[:, 1] = 1 - v if self.params > 0 else v return u # alpha too large if log1mexp(self.params) == 0: return np.ones((n, self.dim)) fr = random_log_series_ln1p(-self.params, n)[:, None] return self.psi(-np.log(u) / fr)
Example #5
Source File: stats.py From arviz with Apache License 2.0 | 6 votes |
def _gpinv(probs, kappa, sigma): """Inverse Generalized Pareto distribution function.""" # pylint: disable=unsupported-assignment-operation, invalid-unary-operand-type x = np.full_like(probs, np.nan) if sigma <= 0: return x ok = (probs > 0) & (probs < 1) if np.all(ok): if np.abs(kappa) < np.finfo(float).eps: x = -np.log1p(-probs) else: x = np.expm1(-kappa * np.log1p(-probs)) / kappa x *= sigma else: if np.abs(kappa) < np.finfo(float).eps: x[ok] = -np.log1p(-probs[ok]) else: x[ok] = np.expm1(-kappa * np.log1p(-probs[ok])) / kappa x *= sigma x[probs == 0] = 0 if kappa >= 0: x[probs == 1] = np.inf else: x[probs == 1] = -sigma / kappa return x
Example #6
Source File: test_matfuncs.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_burkardt_3(self): # This example is due to Laub. # This matrix is ill-suited for the Taylor series approach. # As powers of A are computed, the entries blow up too quickly. exp1 = np.exp(1) exp39 = np.exp(39) A = np.array([ [0, 1], [-39, -40], ], dtype=float) desired = np.array([ [ 39/(38*exp1) - 1/(38*exp39), -np.expm1(-38) / (38*exp1)], [ 39*np.expm1(-38) / (38*exp1), -1/(38*exp1) + 39/(38*exp39)], ], dtype=float) actual = expm(A) assert_allclose(actual, desired)
Example #7
Source File: exponential_weibull.py From chaospy with MIT License | 5 votes |
def _cdf(self, x, a, c): exm1c = -numpy.expm1(-x**c) return (exm1c)**a
Example #8
Source File: test_limits.py From numdifftools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_residue_1_div_1_minus_exp_x(self): def fun(z): return -z / (np.expm1(2 * z)) lim, err = Limit(fun, full_output=True)(0) assert_allclose(lim, -0.5) assert err.error_estimate < 1e-14
Example #9
Source File: exponential_power.py From chaospy with MIT License | 5 votes |
def _cdf(self, x, b): xb = x**b return -numpy.expm1(-numpy.expm1(xb))
Example #10
Source File: generalized_exponential.py From chaospy with MIT License | 5 votes |
def _pdf(self, x, a, b, c): return (a+b*(-numpy.expm1(-c*x)))*numpy.exp((-a-b)*x+b*(-numpy.expm1(-c*x))/c)
Example #11
Source File: generalized_exponential.py From chaospy with MIT License | 5 votes |
def _cdf(self, x, a, b, c): output = -numpy.expm1((-a-b)*x + b*(-numpy.expm1(-c*x))/c) output = numpy.where(x > 0, output, 0) return output
Example #12
Source File: main_helpers.py From mercari-solution with MIT License | 5 votes |
def merge_predictions(X_tr, y_tr, X_te=None, est=None, verbose=True): if est is None: est = Lasso(alpha=0.0001, precompute=True, max_iter=1000, positive=True, random_state=9999, selection='random') est.fit(np.log1p(X_tr), np.log1p(y_tr)) if hasattr(est, 'intercept_') and verbose: logger.info('merge_predictions = \n{:+.4f}\n{}'.format( est.intercept_, '\n'.join('{:+.4f} * {}'.format(coef, i) for i, coef in zip(range(X_tr.shape[0]), est.coef_)))) return (np.expm1(est.predict(np.log1p(X_tr))), np.expm1(est.predict(np.log1p(X_te))) if X_te is not None else None)
Example #13
Source File: test_limits.py From numdifftools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_residue_1_div_1_minus_exp_x(self): def fun(z): return -1.0 / (np.expm1(2 * z)) res_h, err = Residue(fun, full_output=True)(0) assert_allclose(res_h, -0.5) assert err.error_estimate < 1e-14
Example #14
Source File: multicomplex.py From numdifftools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def expm1(self): expz1 = np.expm1(self.z1) return Bicomplex(expz1 * np.cos(self.z2), expz1 * np.sin(self.z2))
Example #15
Source File: test_fornberg.py From numdifftools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def fun4(z): return z * (0.5 + 1. / np.expm1(z))
Example #16
Source File: generalized_extreme.py From chaospy with MIT License | 5 votes |
def _ppf(self, q, c): x = -numpy.log(-numpy.log(q)) return numpy.where(c == 0, x, -numpy.expm1(-c*x)/c)
Example #17
Source File: test_limits.py From numdifftools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_difficult_limit(self): def fun(x): return (x * np.exp(x) - np.expm1(x)) / x ** 2 for path in ['radial', ]: lim, err = Limit(fun, path=path, full_output=True)(0) assert_allclose(lim, 0.5) assert err.error_estimate < 1e-8
Example #18
Source File: mercari_golf.py From mercari-solution with MIT License | 5 votes |
def main(): vectorizer = make_union( on_field('name', Tfidf(max_features=100000, token_pattern='\w+')), on_field('text', Tfidf(max_features=100000, token_pattern='\w+', ngram_range=(1, 2))), on_field(['shipping', 'item_condition_id'], FunctionTransformer(to_records, validate=False), DictVectorizer()), n_jobs=4) y_scaler = StandardScaler() with timer('process train'): train = pd.read_table('../input/train.tsv') train = train[train['price'] > 0].reset_index(drop=True) cv = KFold(n_splits=20, shuffle=True, random_state=42) train_ids, valid_ids = next(cv.split(train)) train, valid = train.iloc[train_ids], train.iloc[valid_ids] y_train = y_scaler.fit_transform(np.log1p(train['price'].values.reshape(-1, 1))) X_train = vectorizer.fit_transform(preprocess(train)).astype(np.float32) print(f'X_train: {X_train.shape} of {X_train.dtype}') del train with timer('process valid'): X_valid = vectorizer.transform(preprocess(valid)).astype(np.float32) with ThreadPool(processes=4) as pool: Xb_train, Xb_valid = [x.astype(np.bool).astype(np.float32) for x in [X_train, X_valid]] xs = [[Xb_train, Xb_valid], [X_train, X_valid]] * 2 y_pred = np.mean(pool.map(partial(fit_predict, y_train=y_train), xs), axis=0) y_pred = np.expm1(y_scaler.inverse_transform(y_pred.reshape(-1, 1))[:, 0]) print('Valid RMSLE: {:.4f}'.format(np.sqrt(mean_squared_log_error(valid['price'], y_pred))))
Example #19
Source File: tf_sparse.py From mercari-solution with MIT License | 5 votes |
def get_rmsle(y_true, y_pred): return np.sqrt(mean_squared_log_error(np.expm1(y_true), np.expm1(y_pred)))
Example #20
Source File: estimate_img_params.py From panaroo with MIT License | 5 votes |
def log1mexp(a): if a < np.log(0.5): return (np.log1p(-np.exp(a))) return (np.log(-np.expm1(a)))
Example #21
Source File: main_helpers.py From mercari-solution with MIT License | 5 votes |
def predict_models(X, fitted_models, vectorizer=None, parallel='thread'): if vectorizer: # TODO: parallelize this with Timer('Transforming data'): X = vectorizer.transform(X) predict_one_ = partial(predict_one, X=X) preds = map_parallel(predict_one_, fitted_models, parallel) return np.expm1(np.vstack(preds).T)
Example #22
Source File: risk.py From InplusTrader_Linux with MIT License | 5 votes |
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY): assert(len(daily_returns) == len(benchmark_daily_returns)) self._portfolio = daily_returns self._benchmark = benchmark_daily_returns self._risk_free_rate = risk_free_rate self._annual_factor = _annual_factor(period) self._daily_risk_free_rate = self._risk_free_rate / self._annual_factor self._alpha = None self._beta = None self._sharpe = None self._return = np.expm1(np.log1p(self._portfolio).sum()) self._annual_return = (1 + self._return) ** (365 / days) - 1 self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum()) self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1 self._max_drawdown = None self._volatility = None self._annual_volatility = None self._benchmark_volatility = None self._benchmark_annual_volatility = None self._information_ratio = None self._sortino = None self._tracking_error = None self._annual_tracking_error = None self._downside_risk = None self._annual_downside_risk = None self._calmar = None self._avg_excess_return = None
Example #23
Source File: risk.py From InplusTrader_Linux with MIT License | 5 votes |
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY): assert(len(daily_returns) == len(benchmark_daily_returns)) self._portfolio = daily_returns self._benchmark = benchmark_daily_returns self._risk_free_rate = risk_free_rate self._annual_factor = _annual_factor(period) self._alpha = None self._beta = None self._sharpe = None self._return = np.expm1(np.log1p(self._portfolio).sum()) self._annual_return = (1 + self._return) ** (365 / days) - 1 # self._annual_return = (1 + self._return) ** (self._annual_factor / len(self._portfolio)) - 1 self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum()) self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1 # self._benchmark_annual_return = (1 + self._benchmark_return) ** (self._annual_factor / len(self._portfolio)) - 1 self._max_drawdown = None self._volatility = None self._annual_volatility = None self._benchmark_volatility = None self._benchmark_annual_volatility = None self._information_ratio = None self._sortino = None self._tracking_error = None self._annual_tracking_error = None self._downside_risk = None self._annual_downside_risk = None self._calmar = None
Example #24
Source File: copularnd.py From copula-py with GNU General Public License v3.0 | 5 votes |
def _frank(M, N, alpha): if(N<2): raise ValueError('Dimensionality Argument [N] must be an integer >= 2') elif(N==2): u1 = uniform.rvs(size=M) p = uniform.rvs(size=M) if abs(alpha) > math.log(sys.float_info.max): u2 = (u1 < 0).astype(int) + np.sign(alpha)*u1 # u1 or 1-u1 elif abs(alpha) > math.sqrt(np.spacing(1)): u2 = -1*np.log((np.exp(-alpha*u1)*(1-p)/p + np.exp(-alpha))/(1 + np.exp(-alpha*u1)*(1-p)/p))/alpha else: u2 = p U = np.column_stack((u1,u2)) else: # Algorithm 1 described in both the SAS Copula Procedure, as well as the # paper: "High Dimensional Archimedean Copula Generation Algorithm" if(alpha<=0): raise ValueError('For N>=3, alpha >0 in Frank Copula') U = np.empty((M,N)) #v_vec = np.empty(M) for ii in range(0,M): p = -1.0*np.expm1(-1*alpha) if(p==1): # boundary case protection p = 1 - np.spacing(1) v = logser.rvs(p, size=1) #v_vec[ii] = v # sample N independent uniform random variables x_i = uniform.rvs(size=N) t = -1*np.log(x_i)/v U[ii,:] = -1.0*np.log1p( np.exp(-t)*np.expm1(-1.0*alpha))/alpha #sio.savemat('logser_v.mat', {'v':v_vec}) return U
Example #25
Source File: copulacdf.py From copula-py with GNU General Public License v3.0 | 5 votes |
def _frank(u, alpha): # C(u1,u2) = -(1/alpha)*log(1 + (exp(-alpha*u1)-1)*(exp(-alpha*u2)-1)/(exp(-alpha)-1)) if(alpha==0): y = np.prod(u,1) else: tmp1 = np.exp(-alpha*np.sum(u,1)) - np.sum(np.exp(-alpha*u),1) y = -np.log( (np.exp(-alpha) + tmp1) / np.expm1(-alpha)) / alpha; return y
Example #26
Source File: ops.py From MyGrad with MIT License | 5 votes |
def __call__(self, a): self.variables = (a,) return np.expm1(a.data)
Example #27
Source File: batch_generator.py From deep-quant with MIT License | 5 votes |
def get_raw_inputs(self,batch,idx,vec): len1 = len(self._fin_colidxs) len2 = len(self._aux_colidxs) n = batch.normalizers[idx] y = vec[0:len1] if self._log_squasher is True: y = np.multiply(np.sign(y),np.expm1(np.fabs(y))) y = n * y if len2 > 0 and len(vec) > len1: assert(len(vec)==len1+len2) y = np.append( y, vec[len1:len1+len2] ) return y
Example #28
Source File: frank.py From copulae with MIT License | 5 votes |
def dtau(self, x=None): # pragma: no cover if x is None: x = self.params return (x / np.expm1(x) + 1 - debye1(x) / x) * (2 / x) ** 2
Example #29
Source File: frank.py From copulae with MIT License | 5 votes |
def drho(self, x=None): # pragma: no cover if x is None: x = self.params return 12 * (x / np.expm1(x) - 3 * debye1(x) + 2 * debye1(x)) / x ** 2
Example #30
Source File: log.py From copulae with MIT License | 5 votes |
def random_log_series_ln1p(alpha: float, size: Numeric = 1): """""" if alpha < -3: # random variate following the logarithmic (series) distribution Log(alpha) p = - alpha / np.log(1 - np.expm1(alpha)) u = np.random.uniform(size=size) up = u > p x = 1 while np.any(up): u[up] -= p x += 1 p *= alpha * (x - 1) / x up = u > p return u else: # random variate following the logarithmic (series) distribution Log(1 - e^h) e = -np.expm1(alpha) u1 = np.random.uniform(size=size) u2 = np.random.uniform(size=size) h = u2 * alpha q = -np.expm1(h) log_q = np.where(h > -0.69314718055994530942, np.log(-np.expm1(h)), np.log1p(-np.exp(h))) mask1 = u1 > e mask2 = u1 < q * q mask3 = mask2 & (log_q == 0) mask4 = mask2 & (log_q != 0) mask5 = ~mask2 & (u1 > q) mask6 = ~mask2 & (u1 <= q) u1[mask6] = 2 with np.errstate(invalid='ignore'): u1[mask4] = 1 + np.floor(np.log(u1 / log_q))[mask4] u1[mask3] = np.inf u1[mask1 | mask5] = 1 return u1