Python scipy.optimize.fmin_powell() Examples
The following are 19
code examples of scipy.optimize.fmin_powell().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.optimize
, or try the search function
.
Example #1
Source File: PyRate.py From PyRate with GNU Affero General Public License v3.0 | 6 votes |
def get_rate_HP(n,target_k,hp_gamma_shape): def estK(alpha,N): return sum([alpha/(alpha+i-1) for i in range(1,int(N+1))]) def opt_gamma_rate(a): a= abs(a[0]) ea =estK(a,n) return exp(abs( ea-target_k )) # from scipy.optimize import fmin_powell as Fopt1 opt = Fopt1(opt_gamma_rate, [np.array(0.001)], full_output=1, disp=0) expected_cp=abs(opt[0]) hp_gamma_rate = expected_cp/hp_gamma_shape return hp_gamma_rate ####### END FUNCTIONS for DIRICHLET PROCESS PRIOR #######
Example #2
Source File: PyRate.py From PyRate with GNU Affero General Public License v3.0 | 6 votes |
def get_rate_HP(n,target_k,hp_gamma_shape): def estK(alpha,N): return sum([alpha/(alpha+i-1) for i in range(1,int(N+1))]) def opt_gamma_rate(a): a= abs(a[0]) ea =estK(a,n) return exp(abs( ea-target_k )) # from scipy.optimize import fmin_powell as Fopt1 opt = Fopt1(opt_gamma_rate, [np.array(0.001)], full_output=1, disp=0) expected_cp=abs(opt[0]) hp_gamma_rate = expected_cp/hp_gamma_shape return hp_gamma_rate ####### END FUNCTIONS for DIRICHLET PROCESS PRIOR #######
Example #3
Source File: model_utils.py From recsys2019 with Apache License 2.0 | 6 votes |
def validate_models(self, n_users, n_debug=None): df_train, df_val = self.load_train_val(n_users, n_debug=n_debug) preds_mat = np.vstack([model.fit_and_predict(df_train, df_val, validate=True) for model in self.models]).T def opt_coefs(coefs): preds = preds_mat.dot(coefs) df_val["preds"] = preds mrr = mrr_fast(df_val, "preds") print(mrr, coefs) return -mrr best_coefs = fmin(opt_coefs, [model.weight for model in self.models]) best_coefs = fmin_powell(opt_coefs, best_coefs) preds = preds_mat.dot(best_coefs) df_val["click_proba"] = preds print("MRR {:4f}".format(mrr_fast(df_val, "click_proba"))) print("Best coefs: ", best_coefs)
Example #4
Source File: bayesreg.py From nispat with GNU General Public License v3.0 | 6 votes |
def estimate(self, hyp0, X, y, optimizer='cg'): """ Function to estimate the model """ if optimizer.lower() == 'cg': # conjugate gradients out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y), disp=True, gtol=self.tol, maxiter=self.n_iter, full_output=1) elif optimizer.lower() == 'powell': # Powell's method out = optimize.fmin_powell(self.loglik, hyp0, (X, y), full_output=1) else: raise ValueError("unknown optimizer") self.hyp = out[0] self.nlZ = out[1] self.optimizer = optimizer return self.hyp
Example #5
Source File: gp.py From nispat with GNU General Public License v3.0 | 5 votes |
def estimate(self, hyp0, covfunc, X, y, optimizer='cg'): """ Function to estimate the model """ if len(X.shape) == 1: X = X[:, np.newaxis] self.hyp0 = hyp0 if optimizer.lower() == 'cg': # conjugate gradients out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (covfunc, X, y), disp=True, gtol=self.tol, maxiter=self.n_iter, full_output=1) elif optimizer.lower() == 'powell': # Powell's method out = optimize.fmin_powell(self.loglik, hyp0, (covfunc, X, y), full_output=1) else: raise ValueError("unknown optimizer") # Always return a 1d array. The optimizer sometimes changes dimesnions if len(out[0].shape) > 1: self.hyp = out[0].flatten() else: self.hyp = out[0] self.nlZ = out[1] self.optimizer = optimizer return self.hyp
Example #6
Source File: descriptive.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def test_joint_skew_kurt(self, skew0, kurt0, return_weights=False): """ Returns - 2 x log-likelihood and the p-value for the joint hypothesis test for skewness and kurtosis Parameters ---------- skew0 : float Skewness value to be tested kurt0 : float Kurtosis value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns ------- test_results : tuple The log-likelihood ratio and p-value of the joint hypothesis test. """ self.skew0 = skew0 self.kurt0 = kurt0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_skew_kurt, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 2) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
Example #7
Source File: descriptive.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def test_kurt(self, kurt0, return_weights=False): """ Returns -2 x log-likelihood and the p-value for the hypothesized kurtosis. Parameters ---------- kurt0 : float Kurtosis value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns ------- test_results : tuple The log-likelihood ratio and p-value of kurt0 """ self.kurt0 = kurt0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_kurt, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 1) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
Example #8
Source File: descriptive.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def test_skew(self, skew0, return_weights=False): """ Returns -2 x log-likelihood and p-value for the hypothesized skewness. Parameters ---------- skew0 : float Skewness value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns -------- test_results : tuple The log-likelihood ratio and p_value of skew0 """ self.skew0 = skew0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_skew, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 1) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
Example #9
Source File: optimizer.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True, maxiter=100, callback=None, retall=False, full_output=True, hess=None): xtol = kwargs.setdefault('xtol', 0.0001) ftol = kwargs.setdefault('ftol', 0.0001) maxfun = kwargs.setdefault('maxfun', None) start_direc = kwargs.setdefault('start_direc', None) retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol, ftol=ftol, maxiter=maxiter, maxfun=maxfun, full_output=full_output, disp=disp, retall=retall, callback=callback, direc=start_direc) if full_output: if not retall: xopt, fopt, direc, niter, fcalls, warnflag = retvals else: xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\ retvals converged = not warnflag retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter, 'fcalls': fcalls, 'warnflag': warnflag, 'converged': converged} if retall: retvals.update({'allvecs': allvecs}) else: xopt = retvals retvals = None return xopt, retvals
Example #10
Source File: PatternFitter.py From pax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def minimize_gof_powell(self, start_coordinates, areas_observed, pmt_selection=None, square_syst_errors=None, statistic='chi2gamma'): direc = None if self.dimensions == 2: # Hack to match old chi2gamma results s = lambda d: 1 if d < 0 else -1 # flake8: noqa direc = np.array([[s(start_coordinates[0]), 0], [0, s(start_coordinates[1])]]) def safe_compute_gof(*args, **kwargs): try: return self.compute_gof(*args, **kwargs) except CoordinateOutOfRangeException: return float('inf') # Minimize chi_square_gamma function, fmin_powell is the call to the SciPy minimizer # It takes the function to minimize, starting position and several options # It returns the optimal values for the position (xopt) and function value (fopt) # A warnflag tells if the maximum number of iterations was exceeded # warnflag 0, OK # warnflag 1, maximum functions evaluations exceeded # warnflag 2, maximum iterations exceeded rv = fmin_powell(safe_compute_gof, start_coordinates, direc=direc, args=(areas_observed, pmt_selection, square_syst_errors, statistic), xtol=0.0001, ftol=0.0001, maxiter=10, maxfun=None, full_output=1, disp=0, retall=0) xopt, fopt, direc, iter, funcalls, warnflag = rv # On failure the minimizer seems to give np.array([float('inf')]) if isinstance(fopt, np.ndarray): fopt = float('nan') return xopt, fopt
Example #11
Source File: recipe-578869.py From code with MIT License | 5 votes |
def test_minimise(self): print '#################################' print '# Test Equilibrium Loss Wager' print '#################################' wager_multiplier=fmin_powell(Root2, x0=1., maxiter=20) print "highest survivability following loss, multiply wager by %2.4f %% "%(wager_multiplier*100)
Example #12
Source File: optimizer.py From vnpy_crypto with MIT License | 5 votes |
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True, maxiter=100, callback=None, retall=False, full_output=True, hess=None): xtol = kwargs.setdefault('xtol', 0.0001) ftol = kwargs.setdefault('ftol', 0.0001) maxfun = kwargs.setdefault('maxfun', None) start_direc = kwargs.setdefault('start_direc', None) retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol, ftol=ftol, maxiter=maxiter, maxfun=maxfun, full_output=full_output, disp=disp, retall=retall, callback=callback, direc=start_direc) if full_output: if not retall: xopt, fopt, direc, niter, fcalls, warnflag = retvals else: xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\ retvals converged = not warnflag retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter, 'fcalls': fcalls, 'warnflag': warnflag, 'converged': converged} if retall: retvals.update({'allvecs': allvecs}) else: xopt = retvals retvals = None return xopt, retvals
Example #13
Source File: test_optimize.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_powell(self): # Powell (direction set) optimization routine if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), method='Powell', options=opts) params, fopt, direc, numiter, func_calls, warnflag = ( res['x'], res['fun'], res['direc'], res['nit'], res['nfev'], res['status']) else: retval = optimize.fmin_powell(self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=self.disp, retall=False) (params, fopt, direc, numiter, func_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # Scipy 0.7.0. Don't allow them to increase. # # However, some leeway must be added: the exact evaluation # count is sensitive to numerical error, and floating-point # computations are not bit-for-bit reproducible across # machines, and when using e.g. MKL, data alignment # etc. affect the rounding error. # assert_(self.funccalls <= 116 + 20, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) # Ensure that the function behaves the same; this is from Scipy 0.7.0 assert_allclose(self.trace[34:39], [[0.72949016, -0.44156936, 0.47100962], [0.72949016, -0.44156936, 0.48052496], [1.45898031, -0.88313872, 0.95153458], [0.72949016, -0.44156936, 0.47576729], [1.72949016, -0.44156936, 0.47576729]], atol=1e-14, rtol=1e-7)
Example #14
Source File: test_optimize.py From Computable with MIT License | 5 votes |
def test_powell(self, use_wrapper=False): """ Powell (direction set) optimization routine """ if use_wrapper: opts = {'maxiter': self.maxiter, 'disp': False, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), method='Powell', options=opts) params, fopt, direc, numiter, func_calls, warnflag = \ res['x'], res['fun'], res['direc'], res['nit'], \ res['nfev'], res['status'] else: retval = optimize.fmin_powell(self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=False, retall=False) (params, fopt, direc, numiter, func_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # Scipy 0.7.0. Don't allow them to increase. # # However, some leeway must be added: the exact evaluation # count is sensitive to numerical error, and floating-point # computations are not bit-for-bit reproducible across # machines, and when using e.g. MKL, data alignment # etc. affect the rounding error. # assert_(self.funccalls <= 116 + 20, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) # Ensure that the function behaves the same; this is from Scipy 0.7.0 assert_allclose(self.trace[34:39], [[0.72949016, -0.44156936, 0.47100962], [0.72949016, -0.44156936, 0.48052496], [1.45898031, -0.88313872, 0.95153458], [0.72949016, -0.44156936, 0.47576729], [1.72949016, -0.44156936, 0.47576729]], atol=1e-14, rtol=1e-7)
Example #15
Source File: descriptive.py From vnpy_crypto with MIT License | 5 votes |
def test_joint_skew_kurt(self, skew0, kurt0, return_weights=False): """ Returns - 2 x log-likelihood and the p-value for the joint hypothesis test for skewness and kurtosis Parameters ---------- skew0 : float Skewness value to be tested kurt0 : float Kurtosis value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns ------- test_results : tuple The log-likelihood ratio and p-value of the joint hypothesis test. """ self.skew0 = skew0 self.kurt0 = kurt0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_skew_kurt, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 2) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
Example #16
Source File: descriptive.py From vnpy_crypto with MIT License | 5 votes |
def test_kurt(self, kurt0, return_weights=False): """ Returns -2 x log-likelihood and the p-value for the hypothesized kurtosis. Parameters ---------- kurt0 : float Kurtosis value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns ------- test_results : tuple The log-likelihood ratio and p-value of kurt0 """ self.kurt0 = kurt0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_kurt, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 1) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
Example #17
Source File: descriptive.py From vnpy_crypto with MIT License | 5 votes |
def test_skew(self, skew0, return_weights=False): """ Returns -2 x log-likelihood and p-value for the hypothesized skewness. Parameters ---------- skew0 : float Skewness value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns -------- test_results : tuple The log-likelihood ratio and p_value of skew0 """ self.skew0 = skew0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_skew, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 1) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
Example #18
Source File: elanova.py From Splunking-Crime with GNU Affero General Public License v3.0 | 4 votes |
def compute_ANOVA(self, mu=None, mu_start=0, return_weights=0): """ Returns -2 log likelihood, the pvalue and the maximum likelihood estimate for a common mean. Parameters ---------- mu : float If a mu is specified, ANOVA is conducted with mu as the common mean. Otherwise, the common mean is the maximum empirical likelihood estimate of the common mean. Default is None. mu_start : float Starting value for commean mean if specific mu is not specified. Default = 0 return_weights : bool if TRUE, returns the weights on observations that maximize the likelihood. Default is FALSE Returns ------- res: tuple The log-likelihood, p-value and estimate for the common mean. """ if mu is not None: llr = self._opt_common_mu(mu) pval = 1 - chi2.cdf(llr, self.num_groups - 1) if return_weights: return llr, pval, mu, self.new_weights else: return llr, pval, mu else: res = optimize.fmin_powell(self._opt_common_mu, mu_start, full_output=1, disp=False) llr = res[1] mu_common = float(res[0]) pval = 1 - chi2.cdf(llr, self.num_groups - 1) if return_weights: return llr, pval, mu_common, self.new_weights else: return llr, pval, mu_common
Example #19
Source File: elanova.py From vnpy_crypto with MIT License | 4 votes |
def compute_ANOVA(self, mu=None, mu_start=0, return_weights=0): """ Returns -2 log likelihood, the pvalue and the maximum likelihood estimate for a common mean. Parameters ---------- mu : float If a mu is specified, ANOVA is conducted with mu as the common mean. Otherwise, the common mean is the maximum empirical likelihood estimate of the common mean. Default is None. mu_start : float Starting value for commean mean if specific mu is not specified. Default = 0 return_weights : bool if TRUE, returns the weights on observations that maximize the likelihood. Default is FALSE Returns ------- res: tuple The log-likelihood, p-value and estimate for the common mean. """ if mu is not None: llr = self._opt_common_mu(mu) pval = 1 - chi2.cdf(llr, self.num_groups - 1) if return_weights: return llr, pval, mu, self.new_weights else: return llr, pval, mu else: res = optimize.fmin_powell(self._opt_common_mu, mu_start, full_output=1, disp=False) llr = res[1] mu_common = float(res[0]) pval = 1 - chi2.cdf(llr, self.num_groups - 1) if return_weights: return llr, pval, mu_common, self.new_weights else: return llr, pval, mu_common