Python scipy.optimize.fminbound() Examples

The following are 20 code examples of scipy.optimize.fminbound(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: qsturng_.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _psturng(q, r, v):
    """scalar version of psturng"""
    if q < 0.:
        raise ValueError('q should be >= 0')

    opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)

    if v == 1:
        if q < _qsturng(.9, r, 1):
            return .1
        elif q > _qsturng(.999, r, 1):
            return .001
        return 1. - fminbound(opt_func, .9, .999, args=(r,v))
    else:
        if q < _qsturng(.1, r, v):
            return .9
        elif q > _qsturng(.999, r, v):
            return .001
        return 1. - fminbound(opt_func, .1, .999, args=(r,v)) 
Example #2
Source File: qsturng.py    From pingouin with GNU General Public License v3.0 6 votes vote down vote up
def _psturng(q, r, v):
    """scalar version of psturng"""
    if q < 0.:
        raise ValueError('q should be >= 0')

    opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)

    if v == 1:
        if q < _qsturng(.9, r, 1):
            return .1
        elif q > _qsturng(.999, r, 1):
            return .001
        return 1. - fminbound(opt_func, .9, .999, args=(r,v))
    else:
        if q < _qsturng(.1, r, v):
            return .9
        elif q > _qsturng(.999, r, v):
            return .001
        return 1. - fminbound(opt_func, .1, .999, args=(r,v)) 
Example #3
Source File: qsturng_.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _psturng(q, r, v):
    """scalar version of psturng"""
    if q < 0.:
        raise ValueError('q should be >= 0')

    opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)

    if v == 1:
        if q < _qsturng(.9, r, 1):
            return .1
        elif q > _qsturng(.999, r, 1):
            return .001
        return 1. - fminbound(opt_func, .9, .999, args=(r,v))
    else:
        if q < _qsturng(.1, r, v):
            return .9
        elif q > _qsturng(.999, r, v):
            return .001
        return 1. - fminbound(opt_func, .1, .999, args=(r,v)) 
Example #4
Source File: operators.py    From mfea-ii with MIT License 6 votes vote down vote up
def learn_rmp(subpops, D):
  K          = len(subpops)
  rmp_matrix = np.eye(K)
  models = learn_models(subpops)

  for k in range(K - 1):
    for j in range(k + 1, K):
      probmatrix = [np.ones([models[k].num_sample, 2]), 
                    np.ones([models[j].num_sample, 2])]
      probmatrix[0][:, 0] = models[k].density(subpops[k])
      probmatrix[0][:, 1] = models[j].density(subpops[k])
      probmatrix[1][:, 0] = models[k].density(subpops[j])
      probmatrix[1][:, 1] = models[j].density(subpops[j])

      rmp = fminbound(lambda rmp: log_likelihood(rmp, probmatrix, K), 0, 1)
      rmp += np.random.randn() * 0.01
      rmp = np.clip(rmp, 0, 1)
      rmp_matrix[k, j] = rmp
      rmp_matrix[j, k] = rmp

  return rmp_matrix

# OPTIMIZATION RESULT HELPERS 
Example #5
Source File: optimization.py    From idr with GNU General Public License v2.0 6 votes vote down vote up
def CA_step(z1, z2, theta, index, min_val, max_val):
    """Take a single coordinate ascent step.
    
    """
    inner_theta = theta.copy()
    def f(alpha):
        inner_theta[index] = theta[index] + alpha
        return -calc_gaussian_mix_log_lhd(inner_theta, z1, z2)

    assert theta[index] >= min_val
    min_step_size = min_val - theta[index]
    assert theta[index] <= max_val
    max_step_size = max_val - theta[index]

    alpha = fminbound(f, min_step_size, max_step_size)
    prev_lhd = -f(0)
    new_lhd = -f(alpha)
    if new_lhd > prev_lhd:
        theta[index] += alpha
    else:
        new_lhd = prev_lhd
    return theta, new_lhd 
Example #6
Source File: classifier.py    From skboost with MIT License 6 votes vote down vote up
def _find_estimator_weight(self, y, dv_pre, y_pred):
        """Make line search to determine estimator weights."""
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            def optimization_function(alpha):
                p_ij = self._estimate_instance_probabilities(dv_pre + alpha * y_pred)
                p_i = self._estimate_bag_probabilites(p_ij)
                return self._negative_log_likelihood(p_i)

            # TODO: Add option to choose optimization method.

            alpha, fval, err, n_func = fminbound(optimization_function, 0.0, 5.0, full_output=True, disp=1)
            if self.learning_rate < 1.0:
                alpha *= self.learning_rate
        return alpha, fval 
Example #7
Source File: descriptive.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_var(self, sig2_0, return_weights=False):
        """
        Returns  -2 x log-likelihoog ratio and the p-value for the
        hypothesized variance

        Parameters
        ----------
        sig2_0 : float
            Hypothesized variance to be tested

        return_weights : bool
            If True, returns the weights that maximize the
            likelihood of observing sig2_0. Default is False

        Returns
        --------
        test_results : tuple
            The  log-likelihood ratio and the p_value  of sig2_0

        Examples
        --------
        >>> import numpy as np
        >>> import statsmodels.api as sm
        >>> random_numbers = np.random.standard_normal(1000)*100
        >>> el_analysis = sm.emplike.DescStat(random_numbers)
        >>> hyp_test = el_analysis.test_var(9500)
        """
        self.sig2_0 = sig2_0
        mu_max = max(self.endog)
        mu_min = min(self.endog)
        llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
                                 full_output=1)[1]
        p_val = chi2.sf(llr, 1)
        if return_weights:
            return llr, p_val, self.new_weights.T
        else:
            return  llr, p_val 
Example #8
Source File: c6.py    From abu with GNU General Public License v3.0 5 votes vote down vote up
def sample_623():
    """
    6.2.3 趋势骨架图
    :return:
    """
    import scipy.optimize as sco
    from scipy.interpolate import interp1d

    # 继续使用TSLA收盘价格序列
    # interp1d线性插值函数
    linear_interp = interp1d(x, y)
    # 绘制插值
    plt.plot(linear_interp(x))

    # fminbound寻找给定范围内的最小值:在linear_inter中寻找全局最优范围1-504
    global_min_pos = sco.fminbound(linear_interp, 1, 504)
    # 绘制全局最优点,全局最小值点,r<:红色三角
    plt.plot(global_min_pos, linear_interp(global_min_pos), 'r<')

    # 每个单位都先画一个点,由两个点连成一条直线形成股价骨架图
    last_postion = None
    # 步长50,每50个单位求一次局部最小
    for find_min_pos in np.arange(50, len(x), 50):
        # fmin_bfgs寻找给定值的局部最小值
        local_min_pos = sco.fmin_bfgs(linear_interp, find_min_pos, disp=0)
        # 形成最小点位置信息(x, y)
        draw_postion = (local_min_pos, linear_interp(local_min_pos))
        # 第一个50单位last_postion=none, 之后都有值
        if last_postion is not None:
            # 将两两临近局部最小值相连,两个点连成一条直线
            plt.plot([last_postion[0][0], draw_postion[0][0]],
                     [last_postion[1][0], draw_postion[1][0]], 'o-')
        # 将这个步长单位内的最小值点赋予last_postion
        last_postion = draw_postion
    plt.show() 
Example #9
Source File: example_scipy.py    From featherweight_web_api with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def function(b, c):
    return optimize.fminbound(f, b, c)

# If called with arguments:
# http://127.0.0.1:5000/function?b=2&c=10
# we get a correct output:
# {"success": true, "error_msg": null, "result": 3.83746830432337} 
Example #10
Source File: descriptive.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def test_var(self, sig2_0, return_weights=False):
        """
        Returns  -2 x log-likelihoog ratio and the p-value for the
        hypothesized variance

        Parameters
        ----------
        sig2_0 : float
            Hypothesized variance to be tested

        return_weights : bool
            If True, returns the weights that maximize the
            likelihood of observing sig2_0. Default is False

        Returns
        --------
        test_results : tuple
            The  log-likelihood ratio and the p_value  of sig2_0

        Examples
        --------
        >>> import numpy as np
        >>> import statsmodels.api as sm
        >>> random_numbers = np.random.standard_normal(1000)*100
        >>> el_analysis = sm.emplike.DescStat(random_numbers)
        >>> hyp_test = el_analysis.test_var(9500)
        """
        self.sig2_0 = sig2_0
        mu_max = max(self.endog)
        mu_min = min(self.endog)
        llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
                                 full_output=1)[1]
        p_val = chi2.sf(llr, 1)
        if return_weights:
            return llr, p_val, self.new_weights.T
        else:
            return  llr, p_val 
Example #11
Source File: optimization.py    From idr with GNU General Public License v2.0 5 votes vote down vote up
def gradient_ascent(r1, r2, theta, gradient_magnitude, 
                    fix_mu=False, fix_sigma=False):
    for j in range(len(theta)):
        if fix_mu and j == 0: continue
        if fix_sigma and j == 1: continue
        
        prev_loss = calc_loss(r1, r2, theta)

        mu, sigma, rho, p = theta
        z1 = compute_pseudo_values(r1, mu, sigma, p)
        z2 = compute_pseudo_values(r2, mu, sigma, p)
        real_grad = calc_pseudo_log_lhd_gradient(theta, z1, z2, False, False)
        
        gradient = numpy.zeros(len(theta))
        gradient[j] = gradient_magnitude
        if real_grad[j] < 0: gradient[j] = -gradient[j]
                
        min_step = 0
        max_step = find_max_step_size(
            theta[j], gradient[j], (False if j in (0,1) else True))

        if max_step < 1e-12: continue

        alpha = fminbound(
            lambda x: calc_loss( r1, r2, theta + x*gradient ),
            min_step, max_step)
                
        loss = calc_loss( r1, r2, theta + alpha*gradient )
        if loss < prev_loss:
            theta += alpha*gradient

    return theta 
Example #12
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fminbound_scalar(self):
        try:
            optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
            self.fail("exception not raised")
        except ValueError as e:
            assert_('must be scalar' in str(e))

        x = optimize.fminbound(self.fun, 1, np.array(5))
        assert_allclose(x, self.solution, atol=1e-6) 
Example #13
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fminbound(self):
        x = optimize.fminbound(self.fun, 0, 1)
        assert_allclose(x, 1, atol=1e-4)

        x = optimize.fminbound(self.fun, 1, 5)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
        assert_allclose(x, self.solution, atol=1e-6)
        assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) 
Example #14
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_fminbound_scalar(self):
        assert_raises(ValueError, optimize.fminbound, self.fun,
                      np.zeros(2), 1)

        x = optimize.fminbound(self.fun, 1, np.array(5))
        assert_allclose(x, self.solution, atol=1e-6) 
Example #15
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_fminbound(self):
        """Test fminbound """
        x = optimize.fminbound(self.fun, 0, 1)
        assert_allclose(x, 1, atol=1e-4)

        x = optimize.fminbound(self.fun, 1, 5)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
        assert_allclose(x, self.solution, atol=1e-6)
        assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) 
Example #16
Source File: optimization.py    From idr with GNU General Public License v2.0 4 votes vote down vote up
def coordinate_ascent(r1, r2, theta, gradient_magnitude, 
                      fix_mu=False, fix_sigma=False):
    for j in range(len(theta)):
        if fix_mu and j == 0: continue
        if fix_sigma and j == 1: continue
        
        prev_loss = calc_loss(r1, r2, theta)

        # find the direction of the gradient
        gradient = numpy.zeros(len(theta))
        gradient[j] = gradient_magnitude
        init_alpha = 5e-12
        while init_alpha < 1e-2:
            pos = calc_loss( r1, r2, theta - init_alpha*gradient )
            neg = calc_loss( r1, r2, theta + init_alpha*gradient )
            if neg < prev_loss < pos:
                gradient[j] = gradient[j]
                #assert(calc_loss( 
                #       r1, r2, theta - init_alpha*gradient ) > prev_loss)
                #assert(calc_loss( 
                #       r1, r2, theta + init_alpha*gradient ) <= prev_loss)
                break
            elif neg > prev_loss > pos:
                gradient[j] = -gradient[j]
                #assert(calc_loss( 
                #    r1, r2, theta - init_alpha*gradient ) > prev_loss)
                #assert(calc_loss( 
                #    r1, r2, theta + init_alpha*gradient ) <= prev_loss)
                break
            else:
                init_alpha *= 10         

        #log( pos - prev_loss, neg - prev_loss )
        assert init_alpha < 1e-1
        
        min_step = 0
        max_step = find_max_step_size(
            theta[j], gradient[j], (False if j in (0,1) else True))

        if max_step < 1e-12: continue

        alpha = fminbound(
            lambda x: calc_loss( r1, r2, theta + x*gradient ),
            min_step, max_step)
        
        
        loss = calc_loss( r1, r2, theta + alpha*gradient )
        #log( "LOSS:", loss, prev_loss, loss-prev_loss )
        if loss < prev_loss:
            theta += alpha*gradient

    return theta 
Example #17
Source File: ifp.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def bellman_operator(V, cp, return_policy=False):
    """
    The approximate Bellman operator, which computes and returns the
    updated value function TV (or the V-greedy policy c if
    return_policy is True).

    Parameters
    ----------
    V : array_like(float)
        A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals)
    cp : ConsumerProblem
        An instance of ConsumerProblem that stores primitives
    return_policy : bool, optional(default=False)
        Indicates whether to return the greed policy given V or the
        updated value function TV.  Default is TV.

    Returns
    -------
    array_like(float)
        Returns either the greed policy given V or the updated value
        function TV.

    """
    # === Simplify names, set up arrays === #
    R, Π, β, u, b = cp.R, cp.Π, cp.β, cp.u, cp.b
    asset_grid, z_vals = cp.asset_grid, cp.z_vals
    new_V = np.empty(V.shape)
    new_c = np.empty(V.shape)
    z_idx = list(range(len(z_vals)))

    # === Linear interpolation of V along the asset grid === #
    vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z])

    # === Solve r.h.s. of Bellman equation === #
    for i_a, a in enumerate(asset_grid):
        for i_z, z in enumerate(z_vals):
            def obj(c):  # objective function to be *minimized*
                y = sum(vf(R * a + z - c, j) * Π[i_z, j] for j in z_idx)
                return - u(c) - β * y
            c_star = fminbound(obj, 1e-8, R * a + z + b)
            new_c[i_a, i_z], new_V[i_a, i_z] = c_star, -obj(c_star)

    if return_policy:
        return new_c
    else:
        return new_V 
Example #18
Source File: optgrowth.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def bellman_operator(w, grid, β, u, f, shocks, Tw=None, compute_policy=0):
    """
    The approximate Bellman operator, which computes and returns the
    updated value function Tw on the grid points.  An array to store
    the new set of values Tw is optionally supplied (to avoid having to
    allocate new arrays at each iteration).  If supplied, any existing data in 
    Tw will be overwritten.

    Parameters
    ----------
    w : array_like(float, ndim=1)
        The value of the input function on different grid points
    grid : array_like(float, ndim=1)
        The set of grid points
    β : scalar
        The discount factor
    u : function
        The utility function
    f : function
        The production function
    shocks : numpy array
        An array of draws from the shock, for Monte Carlo integration (to
        compute expectations).
    Tw : array_like(float, ndim=1) optional (default=None)
        Array to write output values to
    compute_policy : Boolean, optional (default=False)
        Whether or not to compute policy function

    """
    # === Apply linear interpolation to w === #
    w_func = lambda x: np.interp(x, grid, w)

    # == Initialize Tw if necessary == #
    if Tw is None:
        Tw = np.empty_like(w)

    if compute_policy:
        σ = np.empty_like(w)

    # == set Tw[i] = max_c { u(c) + β E w(f(y  - c) z)} == #
    for i, y in enumerate(grid):
        def objective(c):
            return - u(c) - β * np.mean(w_func(f(y - c) * shocks))
        c_star = fminbound(objective, 1e-10, y)
        if compute_policy:
            σ[i] = c_star
        Tw[i] = - objective(c_star)

    if compute_policy:
        return Tw, σ
    else:
        return Tw 
Example #19
Source File: boxcox.py    From sktime with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def boxcox_normmax(x, bounds=None, brack=(-2.0, 2.0), method='pearsonr'):
    # bounds is None, use simple Brent optimisation
    if bounds is None:
        def optimizer(func, args):
            return optimize.brent(func, brack=brack, args=args)

    # otherwise use bounded Brent optimisation
    else:
        # input checks on bounds
        if not isinstance(bounds, tuple) or len(bounds) != 2:
            raise ValueError(
                f"`bounds` must be a tuple of length 2, but found: {bounds}")

        def optimizer(func, args):
            return optimize.fminbound(func, bounds[0], bounds[1], args=args)

    def _pearsonr(x):
        osm_uniform = _calc_uniform_order_statistic_medians(len(x))
        xvals = distributions.norm.ppf(osm_uniform)

        def _eval_pearsonr(lmbda, xvals, samps):
            # This function computes the x-axis values of the probability plot
            # and computes a linear regression (including the correlation) and
            # returns ``1 - r`` so that a minimization function maximizes the
            # correlation.
            y = boxcox(samps, lmbda)
            yvals = np.sort(y)
            r, prob = stats.pearsonr(xvals, yvals)
            return 1 - r

        return optimizer(_eval_pearsonr, args=(xvals, x))

    def _mle(x):
        def _eval_mle(lmb, data):
            # function to minimize
            return -boxcox_llf(lmb, data)

        return optimizer(_eval_mle, args=(x,))

    def _all(x):
        maxlog = np.zeros(2, dtype=float)
        maxlog[0] = _pearsonr(x)
        maxlog[1] = _mle(x)
        return maxlog

    methods = {'pearsonr': _pearsonr,
               'mle': _mle,
               'all': _all}
    if method not in methods.keys():
        raise ValueError("Method %s not recognized." % method)

    optimfunc = methods[method]
    return optimfunc(x) 
Example #20
Source File: utils.py    From claude with MIT License 4 votes vote down vote up
def calcMI_MC(x,y,constellation):
    """
        Transcribed from Dr. Tobias Fehenberger MATLAB code.
        See: https://www.fehenberger.de/#sourcecode
    """
    if y.shape[0] != 1:
        y = y.T
    if x.shape[0] != 1:
        x = x.T
    if constellation.shape[0] == 1:
        constellation = constellation.T

    M = constellation.size
    N = x.size
    P_X = np.zeros( (M,1) )
    
    x = x / np.sqrt( np.mean( np.abs( x )**2 ) ) # normalize such that var(X)=1
    y = y / np.sqrt( np.mean( np.abs( y )**2 ) ) # normalize such that var(Y)=1

    ## Get X in Integer Representation
    xint = np.argmin( np.abs( x - constellation )**2, axis=0)

    fun = lambda h: np.dot( h*x-y, np.conj( h*x-y ).T )
    h = fminbound( fun, 0,2)
    N0 = np.real( (1-h**2)/h**2 )
    y = y / h

    ## Find constellation and empirical input distribution
    for s in np.arange(M):
        P_X[s] = np.sum( xint==s ) / N
        
    ## Monte Carlo estimation of (a lower bound to) the mutual information I(XY)
    qYonX = 1 / ( np.pi*N0 ) * np.exp( ( -(np.real(y)-np.real(x))**2 -(np.imag(y)-np.imag(x))**2 ) / N0 )
    
    qY = 0
    for ii in np.arange(M):
        qY = qY + P_X[ii] * (1/(np.pi*N0)*np.exp((-(np.real(y)-np.real(constellation[ii,0]))**2-(np.imag(y)-np.imag(constellation[ii,0]))**2)/N0))
    
    realmin = np.finfo(float).tiny
    MI=1/N*np.sum(np.log2(np.maximum(qYonX,realmin)/np.maximum(qY,realmin)))

    return MI