Python scipy.optimize.fmin_bfgs() Examples

The following are 30 code examples of scipy.optimize.fmin_bfgs(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: LogisticRegression_OneVsAll.py    From MachineLearning_Python with MIT License 7 votes vote down vote up
def oneVsAll(X,y,num_labels,Lambda):
    # 初始化变量
    m,n = X.shape
    all_theta = np.zeros((n+1,num_labels))  # 每一列对应相应分类的theta,共10列
    X = np.hstack((np.ones((m,1)),X))       # X前补上一列1的偏置bias
    class_y = np.zeros((m,num_labels))      # 数据的y对应0-9,需要映射为0/1的关系
    initial_theta = np.zeros((n+1,1))       # 初始化一个分类的theta
    
    # 映射y
    for i in range(num_labels):
        class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值
    
    #np.savetxt("class_y.csv", class_y[0:600,:], delimiter=',')    
    
    '''遍历每个分类,计算对应的theta值'''
    for i in range(num_labels):
        #optimize.fmin_cg
        result = optimize.fmin_bfgs(costFunction, initial_theta, fprime=gradient, args=(X,class_y[:,i],Lambda)) # 调用梯度下降的优化方法
        all_theta[:,i] = result.reshape(1,-1)   # 放入all_theta中
        
    all_theta = np.transpose(all_theta) 
    return all_theta

# 代价函数 
Example #2
Source File: optimization.py    From paramz with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def opt(self, x_init, f_fp=None, f=None, fp=None):
        """
        Run the optimizer

        """
        rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']

        opt_dict = {}
        if self.xtol is not None:
            print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
        if self.ftol is not None:
            print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
        if self.gtol is not None:
            opt_dict['gtol'] = self.gtol

        opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages,
                                            maxiter=self.max_iters, full_output=True, **opt_dict)
        self.x_opt = opt_result[0]
        self.f_opt = f_fp(self.x_opt)[0]
        self.funct_eval = opt_result[4]
        self.status = rcstrings[opt_result[6]] 
Example #3
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_bfgs_infinite(self):
        # Test corner case where -Inf is the minimum.  See gh-2019.
        func = lambda x: -np.e**-x
        fprime = lambda x: -func(x)
        x0 = [0]
        olderr = np.seterr(over='ignore')
        try:
            if self.use_wrapper:
                opts = {'disp': self.disp}
                x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
                                      args=(), options=opts)['x']
            else:
                x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
            assert_(not np.isfinite(func(x)))
        finally:
            np.seterr(**olderr) 
Example #4
Source File: model_hawkes_sumexpkern_leastsq_test.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_model_hawkes_varying_baseline_least_sq_grad(self):
        """...Test that ModelHawkesExpKernLeastSq gradient is consistent
        with loss
        """
        for model in [self.model, self.model_list]:
            model.period_length = 1.
            model.n_baselines = 3
            coeffs = np.random.rand(model.n_coeffs)

            self.assertLess(check_grad(model.loss, model.grad, coeffs), 1e-5)

            coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
                                   disp=False)

            self.assertAlmostEqual(
                norm(model.grad(coeffs_min)), .0, delta=1e-4) 
Example #5
Source File: test_optimize.py    From Computable with MIT License 6 votes vote down vote up
def test_bfgs_infinite(self, use_wrapper=False):
        """Test corner case where -Inf is the minimum.  See #1494."""
        func = lambda x: -np.e**-x
        fprime = lambda x: -func(x)
        x0 = [0]
        olderr = np.seterr(over='ignore')
        try:
            if use_wrapper:
                opts = {'disp': False}
                x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
                                      args=(), options=opts)['x']
            else:
                x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
            assert_(not np.isfinite(func(x)))
        finally:
            np.seterr(**olderr) 
Example #6
Source File: ABuTLExecute.py    From abu with GNU General Public License v3.0 6 votes vote down vote up
def bfgs_min_pos(find_min_pos, y_len, linear_interp):
    """
    通过scipy.interpolate.interp1d插值形成的模型,通过sco.fmin_bfgs计算min
    :param find_min_pos: 寻找min的点位值
    :param y_len: 原始序列长度,int
    :param linear_interp: scipy.interpolate.interp1d插值形成的模型
    :return: sco.fmin_bfgs成功找到的值,所有失败的或者异常都返回-1
    """
    try:
        local_min_pos = sco.fmin_bfgs(linear_interp, find_min_pos, disp=False)[0]
    except:
        # 所有失败的或者异常都返回-1
        local_min_pos = -1
    if local_min_pos < 0 or local_min_pos > y_len:
        # 所有失败的或者异常都返回-1
        local_min_pos = -1
    return local_min_pos 
Example #7
Source File: voltage_set_points.py    From GridCal with GNU General Public License v3.0 6 votes vote down vote up
def run_bfgs(self):
        """
        Run the optimization
        @return: Nothing
        """

        self.problem = SetPointsOptimizationProblem(self.circuit,
                                                    self.options,
                                                    self.max_iter,
                                                    callback=self.progress_signal.emit)

        xopt = fmin_bfgs(f=self.problem.eval, x0=self.problem.x0,
                         fprime=None, args=(), gtol=1e-05,  epsilon=1e-2,
                         maxiter=self.max_iter, full_output=0, disp=1, retall=0,
                         callback=None)

        self.solution = np.ones(self.problem.dim) + xopt

        # Extract function values from the controller
        self.optimization_values = np.array(self.problem.all_f)

        # send the finnish signal
        self.progress_signal.emit(0.0)
        self.progress_text.emit('Done!')
        self.done_signal.emit() 
Example #8
Source File: c6.py    From abu with GNU General Public License v3.0 5 votes vote down vote up
def sample_623():
    """
    6.2.3 趋势骨架图
    :return:
    """
    import scipy.optimize as sco
    from scipy.interpolate import interp1d

    # 继续使用TSLA收盘价格序列
    # interp1d线性插值函数
    linear_interp = interp1d(x, y)
    # 绘制插值
    plt.plot(linear_interp(x))

    # fminbound寻找给定范围内的最小值:在linear_inter中寻找全局最优范围1-504
    global_min_pos = sco.fminbound(linear_interp, 1, 504)
    # 绘制全局最优点,全局最小值点,r<:红色三角
    plt.plot(global_min_pos, linear_interp(global_min_pos), 'r<')

    # 每个单位都先画一个点,由两个点连成一条直线形成股价骨架图
    last_postion = None
    # 步长50,每50个单位求一次局部最小
    for find_min_pos in np.arange(50, len(x), 50):
        # fmin_bfgs寻找给定值的局部最小值
        local_min_pos = sco.fmin_bfgs(linear_interp, find_min_pos, disp=0)
        # 形成最小点位置信息(x, y)
        draw_postion = (local_min_pos, linear_interp(local_min_pos))
        # 第一个50单位last_postion=none, 之后都有值
        if last_postion is not None:
            # 将两两临近局部最小值相连,两个点连成一条直线
            plt.plot([last_postion[0][0], draw_postion[0][0]],
                     [last_postion[1][0], draw_postion[1][0]], 'o-')
        # 将这个步长单位内的最小值点赋予last_postion
        last_postion = draw_postion
    plt.show() 
Example #9
Source File: gmm.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def fitgmm_cu(self, start, optim_method='bfgs', optim_args=None):
        '''estimate parameters using continuously updating GMM

        Parameters
        ----------
        start : array_like
            starting values for minimization

        Returns
        -------
        paramest : array
            estimated parameters

        Notes
        -----
        todo: add fixed parameter option, not here ???

        uses scipy.optimize.fmin

        '''
##        if not fixed is None:  #fixed not defined in this version
##            raise NotImplementedError

        if optim_args is None:
            optim_args = {}

        if optim_method == 'nm':
            optimizer = optimize.fmin
        elif optim_method == 'bfgs':
            optimizer = optimize.fmin_bfgs
            optim_args['fprime'] = self.score_cu
        elif optim_method == 'ncg':
            optimizer = optimize.fmin_ncg
        else:
            raise ValueError('optimizer method not available')

        #TODO: add other optimization options and results
        return optimizer(self.gmmobjective_cu, start, args=(), **optim_args) 
Example #10
Source File: optimizer.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
                    maxiter=100, callback=None, retall=False,
                    full_output=True, hess=None):
    gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
    norm = kwargs.setdefault('norm', np.Inf)
    epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
    retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
                                 gtol=gtol, norm=norm, epsilon=epsilon,
                                 maxiter=maxiter, full_output=full_output,
                                 disp=disp, retall=retall, callback=callback)
    if full_output:
        if not retall:
            xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
        else:
            (xopt, fopt, gopt, Hinv, fcalls,
             gcalls, warnflag, allvecs) = retvals
        converged = not warnflag
        retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
                'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
                warnflag, 'converged': converged}
        if retall:
            retvals.update({'allvecs': allvecs})
    else:
        xopt = retvals
        retvals = None

    return xopt, retvals 
Example #11
Source File: oneVsAll.py    From zfverify with MIT License 5 votes vote down vote up
def oneVsAll(X, y, num_labels, the_lambda):
    m, n = shape(X)
    all_theta = matrix(zeros((num_labels, n+1)))
    X = hstack((ones((m, 1)), X))
    for c in range(num_labels):
        print 'Training for %d/34' % (c+1)
        initial_theta = zeros((n+1, 1))
        args = (X, (y == c), the_lambda)
        theta = fmin_bfgs(lrCostFunction, initial_theta, fprime=lrGD, args=args, maxiter=50)
        all_theta[c, :] = theta.transpose()

    return all_theta 
Example #12
Source File: propensity.py    From Causalinference with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def calc_coef(X_c, X_t):

	K = X_c.shape[1]

	neg_ll = lambda b: neg_loglike(b, X_c, X_t)
	neg_grad = lambda b: neg_gradient(b, X_c, X_t)

	logit = fmin_bfgs(neg_ll, np.zeros(K), neg_grad,
			  full_output=True, disp=False)

	return logit[0] 
Example #13
Source File: ex2.py    From coursera-ml-py with MIT License 5 votes vote down vote up
def grad_func(t):
    return cf.cost_function(t, X, y)[1]


# Run fmin_bfgs to obtain the optimal theta 
Example #14
Source File: __init__.py    From FreeCAD_assembly2 with GNU Lesser General Public License v2.1 5 votes vote down vote up
def distance_between_axes_fmin( p1, u1, p2, u2):
    from scipy.optimize import fmin_bfgs
    def distance(T):
        t1, t2 = T
        return numpy.linalg.norm( p1 + u1*t1 - (p2 + u2*t2) )
    T_opt = fmin_bfgs( distance, [0 , 0], disp=False)
    return distance(T_opt) 
Example #15
Source File: test_theil_sen.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_spatial_median_2d():
    X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
    _, median = _spatial_median(X, max_iter=100, tol=1.e-6)

    def cost_func(y):
        dists = np.array([norm(x - y) for x in X])
        return np.sum(dists)

    # Check if median is solution of the Fermat-Weber location problem
    fermat_weber = fmin_bfgs(cost_func, median, disp=False)
    assert_array_almost_equal(median, fermat_weber)
    # Check when maximum iteration is exceeded a warning is emitted
    assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.) 
Example #16
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_bfgs_gh_2169(self):
        def f(x):
            if x < 0:
                return 1.79769313e+308
            else:
                return x + 1./x
        xs = optimize.fmin_bfgs(f, [10.], disp=False)
        assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4) 
Example #17
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_bfgs_numerical_jacobian(self):
        # BFGS with numerical jacobian and a vector epsilon parameter.
        # define the epsilon parameter using a random vector
        epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))

        params = optimize.fmin_bfgs(self.func, self.startparams,
                                    epsilon=epsilon, args=(),
                                    maxiter=self.maxiter, disp=False)

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6) 
Example #18
Source File: classic.py    From scikit-extremes with MIT License 5 votes vote down vote up
def _ci_bootstrap(self):
        # Calculate confidence intervals using parametric bootstrap and the
        # percentil interval method
        # This is used to obtain confidence intervals for the estimators and
        # the return values for several return values.
        # all the code in skextremes.utils.bootstrap_ci has been adapted and
        # simplified from that on https://github.com/cgevans/scikits-bootstrap.
        #
        # More info about bootstrapping can be found on:
        #     - https://github.com/cgevans/scikits-bootstrap
        #     - Efron: "An Introduction to the Bootstrap", Chapman & Hall (1993)
        #     - https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
        
        # parametric bootstrap for return levels and parameters   
        
        # The function to bootstrap     
        def func(data):
            sample = _st.genextreme.rvs(self.c, 
                                       loc = self.loc, 
                                       scale = self.scale, 
                                       size = len(self.data))
            c, loc, scale = _st.genextreme.fit(sample, self.c, 
                                              loc = self.loc, 
                                              scale = self.scale,
                                              optimizer = _op.fmin_bfgs)
            T = _np.arange(0.1, 500.1, 0.1)
            sT = _st.genextreme.isf(self.frec/T, c, loc = loc, scale = scale)
            res = [c, loc, scale]
            res.extend(sT.tolist())
            return tuple(res)
        
        # the calculations itself
        out = _bsci(self.data, statfunction = func, n_samples = 500)
        self._ci_Td = out[0, 3:]
        self._ci_Tu = out[1, 3:]
        self.params_ci = OrderedDict()
        self.params_ci['shape']    = (out[0,0], out[1,0])
        self.params_ci['location'] = (out[0,1], out[1,1])
        self.params_ci['scale']    = (out[0,2], out[1,3]) 
Example #19
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_bfgs_nan(self):
        # Test corner case where nan is fed to optimizer.  See gh-2067.
        func = lambda x: x
        fprime = lambda x: np.ones_like(x)
        x0 = [np.nan]
        with np.errstate(over='ignore', invalid='ignore'):
            x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
            assert_(np.isnan(func(x))) 
Example #20
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_bfgs(self):
        # Broyden-Fletcher-Goldfarb-Shanno optimization routine
        if self.use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': self.disp,
                    'return_all': False}
            res = optimize.minimize(self.func, self.startparams,
                                    jac=self.grad, method='BFGS', args=(),
                                    options=opts)

            params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
                    res['x'], res['fun'], res['jac'], res['hess_inv'],
                    res['nfev'], res['njev'], res['status'])
        else:
            retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
                                        args=(), maxiter=self.maxiter,
                                        full_output=True, disp=self.disp,
                                        retall=False)
            (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 10, self.funccalls)
        assert_(self.gradcalls == 8, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[6:8],
                        [[0, -5.25060743e-01, 4.87748473e-01],
                         [0, -5.24885582e-01, 4.87530347e-01]],
                        atol=1e-14, rtol=1e-7) 
Example #21
Source File: model_sccs_test.py    From tick with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _test_grad(self, model, coeffs, delta_check_grad=1e-5,
                   delta_model_grad=1e-4):
        """Test that gradient is consistent with loss and that minimum is
            achievable with a small gradient
            """
        self.assertAlmostEqual(
            check_grad(model.loss, model.grad, coeffs), 0.,
            delta=delta_check_grad)
        # Check that minimum iss achievable with a small gradient
        coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
                               disp=False)
        self.assertAlmostEqual(
            norm(model.grad(coeffs_min)), .0, delta=delta_model_grad) 
Example #22
Source File: model_hawkes_sumexpkern_leastsq_test.py    From tick with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_model_hawkes_least_sq_grad(self):
        """...Test that ModelHawkesExpKernLeastSq gradient is consistent
        with loss
        """

        for model in [self.model, self.model_list]:
            self.assertLess(
                check_grad(model.loss, model.grad, self.coeffs), 1e-5)

            # Check that minimum is achievable with a small gradient
            coeffs_min = fmin_bfgs(model.loss, self.coeffs, fprime=model.grad,
                                   disp=False)
            self.assertAlmostEqual(
                norm(model.grad(coeffs_min)), .0, delta=1e-4) 
Example #23
Source File: generalized_linear_model.py    From tick with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _test_grad(self, model, coeffs, delta_check_grad=None,
                   delta_model_grad=None):
        """Test that gradient is consistent with loss and that minimum is
        achievable with a small gradient
        """
        if coeffs.dtype is np.dtype("float32"):
            check_grad_epsilon = 3e-3
        else:
            check_grad_epsilon = 1e-7

        if delta_check_grad is None:
            delta_check_grad = self.delta_check_grad

        if delta_model_grad is None:
            delta_model_grad = self.delta_model_grad

        with warnings.catch_warnings(record=True):
            grad_check = check_grad(model.loss, model.grad, coeffs,
                                    epsilon=check_grad_epsilon)

        self.assertAlmostEqual(grad_check, 0., delta=delta_check_grad)
        # Check that minimum is achievable with a small gradient

        with warnings.catch_warnings(record=True):
            coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
                                   disp=False)
            coeffs_min = coeffs_min.astype(self.dtype)

        self.assertAlmostEqual(
            norm(model.grad(coeffs_min)), .0, delta=delta_model_grad) 
Example #24
Source File: test_theil_sen.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_spatial_median_2d():
    X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
    _, median = _spatial_median(X, max_iter=100, tol=1.e-6)

    def cost_func(y):
        dists = np.array([norm(x - y) for x in X])
        return np.sum(dists)

    # Check if median is solution of the Fermat-Weber location problem
    fermat_weber = fmin_bfgs(cost_func, median, disp=False)
    assert_array_almost_equal(median, fermat_weber)
    # Check when maximum iteration is exceeded a warning is emitted
    assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.) 
Example #25
Source File: LogisticRegression.py    From MachineLearning_Python with MIT License 5 votes vote down vote up
def LogisticRegression():
    data = loadtxtAndcsv_data("data2.txt", ",", np.float64) 
    X = data[:,0:-1]
    y = data[:,-1]
    
    plot_data(X,y)  # 作图
    
    X = mapFeature(X[:,0],X[:,1])           #映射为多项式
    initial_theta = np.zeros((X.shape[1],1))#初始化theta
    initial_lambda = 0.1                    #初始化正则化系数,一般取0.01,0.1,1.....
    
    J = costFunction(initial_theta,X,y,initial_lambda)  #计算一下给定初始化的theta和lambda求出的代价J
    
    print(J)  #输出一下计算的值,应该为0.693147
    #result = optimize.fmin(costFunction, initial_theta, args=(X,y,initial_lambda))    #直接使用最小化的方法,效果不好
    '''调用scipy中的优化算法fmin_bfgs(拟牛顿法Broyden-Fletcher-Goldfarb-Shanno)
    - costFunction是自己实现的一个求代价的函数,
    - initial_theta表示初始化的值,
    - fprime指定costFunction的梯度
    - args是其余测参数,以元组的形式传入,最后会将最小化costFunction的theta返回 
    '''
    result = optimize.fmin_bfgs(costFunction, initial_theta, fprime=gradient, args=(X,y,initial_lambda))    
    p = predict(X, result)   #预测
    print(u'在训练集上的准确度为%f%%'%np.mean(np.float64(p==y)*100))   # 与真实值比较,p==y返回True,转化为float   
    
    X = data[:,0:-1]
    y = data[:,-1]    
    plotDecisionBoundary(result,X,y)    #画决策边界  
    
    

# 加载txt和csv文件 
Example #26
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_bfgs_numerical_jacobian(self):
        """ BFGS with numerical jacobian and a vector epsilon parameter """
        # define the epsilon parameter using a random vector
        epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))

        params = optimize.fmin_bfgs(self.func, self.startparams,
                                    epsilon=epsilon, args=(),
                                    maxiter=self.maxiter, disp=False)

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6) 
Example #27
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_bfgs_nan(self):
        """Test corner case where nan is fed to optimizer.  See #1542."""
        func = lambda x: x
        fprime = lambda x: np.ones_like(x)
        x0 = [np.nan]
        olderr = np.seterr(over='ignore')
        try:
            x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
            assert_(np.isnan(func(x)))
        finally:
            np.seterr(**olderr) 
Example #28
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_bfgs(self, use_wrapper=False):
        """ Broyden-Fletcher-Goldfarb-Shanno optimization routine """
        if use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': False,
                    'return_all': False}
            res = optimize.minimize(self.func, self.startparams,
                                    jac=self.grad, method='BFGS', args=(),
                                    options=opts)

            params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
                    res['x'], res['fun'], res['jac'], res['hess_inv'], \
                    res['nfev'], res['njev'], res['status']
        else:
            retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
                                        args=(), maxiter=self.maxiter,
                                        full_output=True, disp=False, retall=False)

            (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 10, self.funccalls)
        assert_(self.gradcalls == 8, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[6:8],
                        [[0, -5.25060743e-01, 4.87748473e-01],
                         [0, -5.24885582e-01, 4.87530347e-01]],
                        atol=1e-14, rtol=1e-7) 
Example #29
Source File: gmm.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def fitgmm_cu(self, start, optim_method='bfgs', optim_args=None):
        '''estimate parameters using continuously updating GMM

        Parameters
        ----------
        start : array_like
            starting values for minimization

        Returns
        -------
        paramest : array
            estimated parameters

        Notes
        -----
        todo: add fixed parameter option, not here ???

        uses scipy.optimize.fmin

        '''
##        if not fixed is None:  #fixed not defined in this version
##            raise NotImplementedError

        if optim_args is None:
            optim_args = {}

        if optim_method == 'nm':
            optimizer = optimize.fmin
        elif optim_method == 'bfgs':
            optimizer = optimize.fmin_bfgs
            optim_args['fprime'] = self.score_cu
        elif optim_method == 'ncg':
            optimizer = optimize.fmin_ncg
        else:
            raise ValueError('optimizer method not available')

        #TODO: add other optimization options and results
        return optimizer(self.gmmobjective_cu, start, args=(), **optim_args) 
Example #30
Source File: optimizer.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
                    maxiter=100, callback=None, retall=False,
                    full_output=True, hess=None):
    gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
    norm = kwargs.setdefault('norm', np.Inf)
    epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
    retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
                                 gtol=gtol, norm=norm, epsilon=epsilon,
                                 maxiter=maxiter, full_output=full_output,
                                 disp=disp, retall=retall, callback=callback)
    if full_output:
        if not retall:
            xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
        else:
            (xopt, fopt, gopt, Hinv, fcalls,
             gcalls, warnflag, allvecs) = retvals
        converged = not warnflag
        retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
                'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
                warnflag, 'converged': converged}
        if retall:
            retvals.update({'allvecs': allvecs})
    else:
        xopt = retvals
        retvals = None

    return xopt, retvals