Python scipy.optimize.differential_evolution() Examples
The following are 20
code examples of scipy.optimize.differential_evolution().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.optimize
, or try the search function
.
Example #1
Source File: test__differential_evolution.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_bounds_checking(self): # test that the bounds checking works func = rosen bounds = [(-3, None)] assert_raises(ValueError, differential_evolution, func, bounds) bounds = [(-3)] assert_raises(ValueError, differential_evolution, func, bounds) bounds = [(-3, 3), (3, 4, 5)] assert_raises(ValueError, differential_evolution, func, bounds)
Example #2
Source File: baseline_simple.py From graph-generation with MIT License | 5 votes |
def Graph_generator_baseline_train_optimizationbased(graphs,generator='BA',metric='degree'): graph_nodes = [graphs[i].number_of_nodes() for i in range(len(graphs))] parameter = {} for i in range(len(graph_nodes)): print('graph ',i) nodes = graph_nodes[i] if generator=='BA': n = nodes m = optimizer_brute(1,10,1, nodes, graphs[i], generator, metric) parameter_temp = [n,m,1] elif generator=='Gnp': n = nodes p = optimizer_brute(1e-6,1,0.01, nodes, graphs[i], generator, metric) ## if use evolution # result = opt.differential_evolution(Loss,bounds=[(0,1)],args=(nodes, graphs[i], generator, metric),maxiter=1000) # p = result.x parameter_temp = [n, p, 1] # update parameter list if nodes not in parameter.keys(): parameter[nodes] = parameter_temp else: count = parameter[nodes][2] parameter[nodes] = [(parameter[nodes][i]*count+parameter_temp[i])/(count+1) for i in range(len(parameter[nodes]))] parameter[nodes][2] = count+1 print(parameter) return parameter
Example #3
Source File: nodes.py From quantum-honeycomp with GNU General Public License v3.0 | 5 votes |
def degenerate_points(h,n=0): """Return the points in the Brillouin zone that have a node in the bandstructure""" from scipy.optimize import differential_evolution bounds = [(0.,1.) for i in range(h.dimensionality)] hk_gen = h.get_hk_gen() # generator def get_point(x0): def f(k): # conduction band eigenvalues hk = hk_gen(k) # Hamiltonian es = lg.eigvalsh(hk) # get eigenvalues return abs(es[n]-es[n+1]) # gap res = differential_evolution(f,bounds=bounds) # minimize return res.x x0 = np.random.random(h.dimensionality) # inital vector return get_point(x0) # get the k-point
Example #4
Source File: parameterestimation.py From tellurium with Apache License 2.0 | 5 votes |
def run(self,func=None): """Allows the user to set the data from a File This data is to be compared with the simulated data in the process of parameter estimation Args: func: An Optional Variable with default value (None) which by default run differential evolution which is from scipy function. Users can provide reference to their defined function as argument. Returns: The Value of the parameter(s) which are estimated by the function provided. .. sectionauthor:: Shaik Asifullah <s.asifullah7@gmail.com> """ self._parameter_names = self.bounds.keys() self._parameter_bounds = self.bounds.values() self._model_roadrunner = te.loada(self.model.model) x_data = self.data[:,0] y_data = self.data[:,1:] arguments = (x_data,y_data) if(func is not None): result = differential_evolution(self._SSE, self._parameter_bounds, args=arguments) return(result.x) else: result = func(self._SSE,self._parameter_bounds,args=arguments) return(result.x)
Example #5
Source File: ewm_opt.py From xam with MIT License | 5 votes |
def calc_optimized_ewm(series, shift=1, metric=metrics.mean_squared_error, adjust=False, eps=10e-5, **kwargs): def f(alpha): shifted_ewm = _calc_shifted_ewm( series=series, shift=shift, alpha=min(max(alpha, 0), 1), adjust=adjust ) corr = metric(series[shift:], shifted_ewm[shift:]) return corr res = optimize.differential_evolution(func=f, bounds=[(0 + eps, 1 - eps)], **kwargs) return _calc_shifted_ewm(series=series, shift=shift, alpha=res['x'][0], adjust=adjust)
Example #6
Source File: test__differential_evolution.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_callback_terminates(self): # test that if the callback returns true, then the minimization halts bounds = [(0, 2), (0, 2)] def callback(param, convergence=0.): return True result = differential_evolution(rosen, bounds, callback=callback) assert_string_equal(result.message, 'callback function requested stop early ' 'by returning True')
Example #7
Source File: test__differential_evolution.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_args_tuple_is_passed(self): # test that the args tuple is passed to the cost function properly. bounds = [(-10, 10)] args = (1., 2., 3.) def quadratic(x, *args): if type(args) != tuple: raise ValueError('args should be a tuple') return args[0] + args[1] * x + args[2] * x**2. result = differential_evolution(quadratic, bounds, args=args, polish=True) assert_almost_equal(result.fun, 2 / 3.)
Example #8
Source File: test__differential_evolution.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_init_with_invalid_strategy(self): # test that passing an invalid strategy raises ValueError func = rosen bounds = [(-3, 3)] assert_raises(ValueError, differential_evolution, func, bounds, strategy='abc')
Example #9
Source File: test__differential_evolution.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_quadratic_from_diff_ev(self): # test the quadratic function from differential_evolution function differential_evolution(self.quadratic, [(-100, 100)], tol=0.02)
Example #10
Source File: test__differential_evolution.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_gh_4511_regression(self): # This modification of the differential evolution docstring example # uses a custom popsize that had triggered an off-by-one error. # Because we do not care about solving the optimization problem in # this test, we use maxiter=1 to reduce the testing time. bounds = [(-5, 5), (-5, 5)] result = differential_evolution(rosen, bounds, popsize=1815, maxiter=1)
Example #11
Source File: functions.py From OpenOA with BSD 3-Clause "New" or "Revised" License | 5 votes |
def logistic_5_parametric(windspeed_column, power_column): """ The present implementation follows the filtering method reported in: M. Yesilbudaku Partitional clustering-based outlier detection for power curve optimization of wind turbines 2016 IEEE International Conference on Renewable Energy Research and Applications (ICRERA), Birmingham, 2016, pp. 1080-1084. and the power curve method developed and reviewed in: M Lydia, AI Selvakumar, SS Kumar, GEP. Kumar Advanced algorithms for wind turbine power curve modeling IEEE Trans Sustainable Energy, 4 (2013), pp. 827-835 M. Lydia, S.S. Kumar, I. Selvakumar, G.E. Prem Kumar A comprehensive review on wind turbine power curve modeling techniques Renew. Sust. Energy Rev., 30 (2014), pp. 452-460 In this case, the function fits the 5 parameter logistics function to observed data via a least-squares optimization (i.e. minimizing the sum of the squares of the residual between the points as evaluated by the parameterized function and the points of observed data). Args: windspeed_column (:obj:`pandas.Series`): feature column power_column (:obj:`pandas.Series`): response column bin_width(:obj:`float`): width of windspeed bin, default is 0.5 m/s according to standard windspeed_start(:obj:`float`): left edge of first windspeed bin windspeed_end(:obj:`float`): right edge of last windspeed bin Returns: :obj:`function`: Python function of type (Array[float] -> Array[float]) implementing the power curve. """ return fit_parametric_power_curve(windspeed_column, power_column, curve=logistic5param, optimization_algorithm=differential_evolution, cost_function=least_squares, bounds=((1200, 1800), (-10, -1e-3), (1e-3, 30), (1e-3, 1), (1e-3, 10)))
Example #12
Source File: baseline_simple.py From GraphRNN with MIT License | 5 votes |
def Graph_generator_baseline_train_optimizationbased(graphs,generator='BA',metric='degree'): graph_nodes = [graphs[i].number_of_nodes() for i in range(len(graphs))] parameter = {} for i in range(len(graph_nodes)): print('graph ',i) nodes = graph_nodes[i] if generator=='BA': n = nodes m = optimizer_brute(1,10,1, nodes, graphs[i], generator, metric) parameter_temp = [n,m,1] elif generator=='Gnp': n = nodes p = optimizer_brute(1e-6,1,0.01, nodes, graphs[i], generator, metric) ## if use evolution # result = opt.differential_evolution(Loss,bounds=[(0,1)],args=(nodes, graphs[i], generator, metric),maxiter=1000) # p = result.x parameter_temp = [n, p, 1] # update parameter list if nodes not in parameter.keys(): parameter[nodes] = parameter_temp else: count = parameter[nodes][2] parameter[nodes] = [(parameter[nodes][i]*count+parameter_temp[i])/(count+1) for i in range(len(parameter[nodes]))] parameter[nodes][2] = count+1 print(parameter) return parameter
Example #13
Source File: fitting.py From airfoil-opt-gan with MIT License | 5 votes |
def parsec_airfoil(airfoil): n_points = airfoil.shape[0] func = lambda x: np.linalg.norm(sythesize(x, n_points) - airfoil) bounds = [(0.001, 0.1), # rle (1e-4, 0.5), # x_pre (-0.1, 0.0), # y_pre (-0.5, 0.5), # d2ydx2_pre (-10, 10), # th_pre (1e-4, 0.5), # x_suc (0.0, 0.1), # y_suc (-0.5, 0.5), # d2ydx2_suc (-10, 10) # th_suc ] bounds = np.array(bounds) n_restarts = 10 opt_x = None opt_f = np.inf x0s = np.random.uniform(bounds[:,0], bounds[:,1], size=(n_restarts, bounds.shape[0])) for x0 in x0s: x, f, _ = fmin_l_bfgs_b(func, x0, approx_grad=1, bounds=bounds, disp=1) if f < opt_f: opt_x = x opt_f = f # res = differential_evolution(func, bounds=bounds, disp=1) # opt_x = res.x # opt_f = res.fun print(opt_x) print(opt_f) return opt_x
Example #14
Source File: gap.py From quantum-honeycomp with GNU General Public License v3.0 | 4 votes |
def indirect_gap(h): """Calculates the gap for a 2d Hamiltonian by doing a kmesh sampling. It will return the positive energy with smaller value""" from scipy.optimize import minimize hk_gen = h.get_hk_gen() # generator def gete(k): # return the energies hk = hk_gen(k) # Hamiltonian if h.is_sparse: es = algebra.smalleig(hk,numw=10) # sparse else: es = algebra.eigvalsh(hk) # get eigenvalues return es # get the energies # We will assume that the chemical potential is at zero def func(k): # conduction band eigenvalues es = gete(k) # get eigenvalues try: es = es[es>0.] # conduction band return np.min(es) # minimum energy except: return 0.0 def funv(k): # valence band eigenvalues es = gete(k) # get eigenvalues try: es = -es[es<0.] # valence band return np.min(es) # maximum energy except: return 0.0 def funcv(k): # valence band eigenvalues es = gete(k) # get eigenvalues ec = np.min(es[es>0.0]) # conduction band ev = np.min(-es[es<0.0]) # valence band return ec+ev # energy difference def opte(f): """Optimize the eigenvalues""" from scipy.optimize import differential_evolution from scipy.optimize import minimize bounds = [(0.,1.) for i in range(h.dimensionality)] x0 = np.random.random(h.dimensionality) # inital vector res = differential_evolution(f,bounds=bounds) # res = minimize(f,res.x,method="Powell") return f(res.x) ev = opte(funv) # optimize valence band # return ev ec = opte(func) # optimize conduction band return ec+ev # return result # return np.min(gaps)
Example #15
Source File: diff_evo.py From kernel_tuner with Apache License 2.0 | 4 votes |
def tune(runner, kernel_options, device_options, tuning_options): """ Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: kernel_tuner.interface.Options :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: kernel_tuner.interface.Options :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict() """ results = [] method = tuning_options.strategy_options.get("method", "best1bin") tuning_options["scaling"] = False #build a bounds array as needed for the optimizer bounds = get_bounds(tuning_options.tune_params) args = (kernel_options, tuning_options, runner, results) #call the differential evolution optimizer opt_result = differential_evolution(_cost_func, bounds, args, maxiter=1, polish=False, strategy=method, disp=tuning_options.verbose) if tuning_options.verbose: print(opt_result.message) return results, runner.dev.get_environment()
Example #16
Source File: libfit.py From sharpy with BSD 3-Clause "New" or "Revised" License | 4 votes |
def poly_fit(kv, Yv, dyv, ddyv, method='leastsq', Bup=None): """ Find best II order fitting polynomial from frequency response Yv over the frequency range kv for both continuous (ds=None) and discrete (ds>0) LTI systems. Input: - kv: frequency points - Yv: frequency response - dyv,ddyv: frequency responses of I and II order derivatives - method='leastsq','dev': algorithm for minimisation - Bup (only 'dev' method): bounds for bv coefficients as per scipy.optimize.differential_evolution. This is a length 3 array. Important: - this function attributes equal weight to each data-point! """ if method == 'leastsq': # pointwise residual def funRes(bv, kv, Yv, dyv, ddyv): B0, B1, B2 = bv rv = fpoly(kv, B0, B1, B2, dyv, ddyv) - Yv return np.concatenate((rv.real, rv.imag)) # solve bvopt, cost = scopt.leastsq(funRes, x0=[0., 0., 0.], args=(kv, Yv, dyv, ddyv)) elif method == 'dev': # use genetic algorithm with objective a sum of H2 and Hinf norms of # residual def funRes(bv, kv, Yv, dyv, ddyv): B0, B1, B2 = bv rv = fpoly(kv, B0, B1, B2, dyv, ddyv) - Yv Nk = len(kv) rvsq = rv * rv.conj() # H2norm=np.sqrt(np.trapz(rvsq/(Nk-1.))) # return H2norm+np.linalg.norm(rv,np.inf) return np.sum(rvsq) # prepare bounds if Bup is None: Bounds = 3 * ((-Bup, Bup),) else: assert len(Bup) == 3, 'Bup must be a length 3 list/array' Bounds = ((-Bup[0], Bup[0]), (-Bup[1], Bup[1]), (-Bup[2], Bup[2]),) res = scopt.differential_evolution( func=funRes, args=(kv, Yv, dyv, ddyv), strategy='best1bin', bounds=Bounds) bvopt = res.x cost = funRes(bvopt, kv, Yv, dyv, ddyv) return bvopt, cost
Example #17
Source File: rbf.py From pwtools with BSD 3-Clause "New" or "Revised" License | 4 votes |
def fit_opt(points, values, method='de', what='pr', cv_kwds=dict(ns=5, nr=1), opt_kwds=dict(), rbf_kwds=dict()): """Optimize :math:`p` or :math:`(p,r)` using a cross validation error metric or the direct fit error if `cv_kwds` is None. Parameters ---------- points, values : see :class:`Rbf` method : str | 'de' : :func:`scipy.optimize.differential_evolution` | 'fmin': :func:`scipy.optimize.fmin` what : str 'p' or 'pr' cv_kwds, rbf_kwds : see :class:`FitError` opt_kwds : dict kwds for the optimizer (see `method`) """ assert what in ['p', 'pr'], ("unknown `what` value: {}".format(what)) assert method in ['de', 'fmin'], ("unknown `method` value: {}".format(what)) fit_err = FitError(points, values, cv_kwds=cv_kwds, rbf_kwds=rbf_kwds) if cv_kwds is None: func = lambda params: fit_err.err_direct(params) else: func = lambda params: np.median(fit_err.err_cv(params)) p0 = estimate_p(points) disp = opt_kwds.pop('disp', False) if method == 'fmin': if what == 'p': x0 = opt_kwds.pop('x0', [p0]) elif what == 'pr': x0 = opt_kwds.pop('x0', [p0, 1e-8]) xopt = optimize.fmin(func, x0, disp=disp, **opt_kwds) elif method == 'de': if what == 'p': bounds = opt_kwds.pop('bounds', [(0, 5*p0)]) assert len(bounds) == 1, "len(bounds) != 1" elif what == 'pr': bounds = opt_kwds.pop('bounds', [(0, 5*p0), (1e-12, 1e-1)]) assert len(bounds) == 2, "len(bounds) != 2" ret = optimize.differential_evolution(func, bounds=bounds, disp=disp, **opt_kwds) xopt = ret.x if what == 'pr': rbfi = Rbf(points, values, p=xopt[0], r=xopt[1]) else: rbfi = Rbf(points, values, p=xopt[0]) return rbfi
Example #18
Source File: abcmodel.py From RRMPG with MIT License | 4 votes |
def fit(self, qobs, prec, initial_state=0): """Fit the model to a timeseries of discharge using. This functions uses scipy's global optimizer (differential evolution) to find a good set of parameters for the model, so that the observed discharge is simulated as good as possible. Args: qobs: Array of observed streaflow discharge. prec: Array of precipitation data. initial_state: (optional) Initial value for the storage. Returns: res: A scipy OptimizeResult class object. Raises: ValueError: If one of the inputs contains invalid values. TypeError: If one of the inputs has an incorrect datatype. """ # Validation check of the inputs qobs = validate_array_input(qobs, np.float64, 'qobs') prec = validate_array_input(prec, np.float64, 'precipitation') # Check if there exist negative precipitation if check_for_negatives(prec): raise ValueError("In the precipitation array are negative values.") # Validation check of the initial state if not isinstance(initial_state, numbers.Number) or initial_state < 0: msg = ["The variable 'initial_state' must be a numercial scaler ", "greate than 0."] raise TypeError("".join(msg)) # Cast initial state as float initial_state = float(initial_state) # pack input arguments for scipy optimizer args = (prec, initial_state, qobs, self._dtype) bnds = tuple([self._default_bounds[p] for p in self._param_list]) # call the actual optimizer function res = optimize.differential_evolution(_loss, bounds=bnds, args=args) return res
Example #19
Source File: mix.py From dmipy with MIT License | 4 votes |
def stochastic_objective_function(self, optimized_parameter_vector, data, acquisition_scheme, x0_params): """Objective function for stochastic non-linear parameter estimation using differential_evolution """ x0_bool_array = ~np.isnan(x0_params) if self.Nmodels == 1: # add fixed parameters if given. if np.all(np.isnan(x0_params)): parameter_vector = optimized_parameter_vector else: parameter_vector = np.empty(len(x0_bool_array)) parameter_vector[~x0_bool_array] = optimized_parameter_vector parameter_vector[x0_bool_array] = x0_params[x0_bool_array] parameter_vector = ( parameter_vector * self.model.scales_for_optimization) parameters = self.model.parameter_vector_to_parameters( parameter_vector) E_hat = self.model(acquisition_scheme, **parameters) elif self.Nmodels > 1: if np.all(np.isnan(x0_params)): parameter_vector = np.r_[optimized_parameter_vector, np.ones(self.Nmodels)] else: parameter_vector = np.ones(len(x0_bool_array)) x0_bool_n0_vf = x0_bool_array[:-self.Nmodels] parameter_vector_no_vf = np.empty( len(x0_bool_n0_vf), dtype=float) parameter_vector_no_vf[~x0_bool_n0_vf] = ( optimized_parameter_vector) parameter_vector_no_vf[x0_bool_n0_vf] = x0_params[ :-self.Nmodels][x0_bool_n0_vf] parameter_vector[:-self.Nmodels] = parameter_vector_no_vf parameter_vector = ( parameter_vector * self.model.scales_for_optimization) parameters = self.model.parameter_vector_to_parameters( parameter_vector) phi_x = self.model(acquisition_scheme, quantity="stochastic cost function", **parameters) if np.all(~np.isnan(x0_params[-self.Nmodels:])): # if initial guess is given for volume fractions vf = x0_params[-self.Nmodels:] else: A = np.dot(phi_x.T, phi_x) try: phi_inv = np.dot(np.linalg.inv(A), phi_x.T) vf = np.dot(phi_inv, data) except np.linalg.linalg.LinAlgError: # happens when models have the same signal attenuations. vf = np.ones(self.Nmodels) / float(self.Nmodels) E_hat = np.dot(phi_x, vf) objective = np.dot(data - E_hat, data - E_hat).squeeze() return objective * 1e5
Example #20
Source File: functional.py From vnpy_crypto with MIT License | 4 votes |
def _min_max_band(args): """Min and max values at `idx`. Global optimization to find the extrema per component. Parameters ---------- args: list It is a list of an idx and other arguments as a tuple: idx : int Index value of the components to compute The tuple contains: band : list of float PDF values `[min_pdf, max_pdf]` to be within. pca : statsmodels Principal Component Analysis instance The PCA object to use. bounds : sequence ``(min, max)`` pair for each components ks_gaussian : KDEMultivariate instance Returns ------- band : tuple of float ``(max, min)`` curve values at `idx` """ idx, (band, pca, bounds, ks_gaussian) = args if have_de_optim: max_ = differential_evolution(_curve_constrained, bounds=bounds, args=(idx, -1, band, pca, ks_gaussian), maxiter=7).x min_ = differential_evolution(_curve_constrained, bounds=bounds, args=(idx, 1, band, pca, ks_gaussian), maxiter=7).x else: max_ = brute(_curve_constrained, ranges=bounds, finish=fmin, args=(idx, -1, band, pca, ks_gaussian)) min_ = brute(_curve_constrained, ranges=bounds, finish=fmin, args=(idx, 1, band, pca, ks_gaussian)) band = (_inverse_transform(pca, max_)[0][idx], _inverse_transform(pca, min_)[0][idx]) return band