Python scipy.optimize.fmin_cg() Examples

The following are 14 code examples of scipy.optimize.fmin_cg(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: LogisticRegression_OneVsAll.py    From MachineLearning_Python with MIT License 7 votes vote down vote up
def oneVsAll(X,y,num_labels,Lambda):
    # 初始化变量
    m,n = X.shape
    all_theta = np.zeros((n+1,num_labels))  # 每一列对应相应分类的theta,共10列
    X = np.hstack((np.ones((m,1)),X))       # X前补上一列1的偏置bias
    class_y = np.zeros((m,num_labels))      # 数据的y对应0-9,需要映射为0/1的关系
    initial_theta = np.zeros((n+1,1))       # 初始化一个分类的theta
    
    # 映射y
    for i in range(num_labels):
        class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值
    
    #np.savetxt("class_y.csv", class_y[0:600,:], delimiter=',')    
    
    '''遍历每个分类,计算对应的theta值'''
    for i in range(num_labels):
        #optimize.fmin_cg
        result = optimize.fmin_bfgs(costFunction, initial_theta, fprime=gradient, args=(X,class_y[:,i],Lambda)) # 调用梯度下降的优化方法
        all_theta[:,i] = result.reshape(1,-1)   # 放入all_theta中
        
    all_theta = np.transpose(all_theta) 
    return all_theta

# 代价函数 
Example #2
Source File: bayesreg.py    From nispat with GNU General Public License v3.0 6 votes vote down vote up
def estimate(self, hyp0, X, y, optimizer='cg'):
        """ Function to estimate the model """

        if optimizer.lower() == 'cg':  # conjugate gradients
            out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y),
                                   disp=True, gtol=self.tol,
                                   maxiter=self.n_iter, full_output=1)

        elif optimizer.lower() == 'powell':  # Powell's method
            out = optimize.fmin_powell(self.loglik, hyp0, (X, y),
                                       full_output=1)
        else:
            raise ValueError("unknown optimizer")

        self.hyp = out[0]
        self.nlZ = out[1]
        self.optimizer = optimizer

        return self.hyp 
Example #3
Source File: optimizer.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True,
                maxiter=100, callback=None, retall=False,
                full_output=True, hess=None):
    gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
    norm = kwargs.setdefault('norm', np.Inf)
    epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
    retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,
                               epsilon=epsilon, maxiter=maxiter,
                               full_output=full_output, disp=disp,
                               retall=retall, callback=callback)
    if full_output:
        if not retall:
            xopt, fopt, fcalls, gcalls, warnflag = retvals
        else:
            xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals
        converged = not warnflag
        retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
                   'warnflag': warnflag, 'converged': converged}
        if retall:
            retvals.update({'allvecs': allvecs})

    else:
        xopt = retvals
        retvals = None

    return xopt, retvals 
Example #4
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_cg(self, use_wrapper=False):
        """ conjugate gradient optimization routine """
        if use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': False,
                    'return_all': False}
            res = optimize.minimize(self.func, self.startparams, args=(),
                                    method='CG', jac=self.grad,
                                    options=opts)

            params, fopt, func_calls, grad_calls, warnflag = \
                res['x'], res['fun'], res['nfev'], res['njev'], res['status']
        else:
            retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
                                      maxiter=self.maxiter,
                                      full_output=True, disp=False, retall=False)

            (params, fopt, func_calls, grad_calls, warnflag) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 9, self.funccalls)
        assert_(self.gradcalls == 7, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[2:4],
                        [[0, -0.5, 0.5],
                         [0, -5.05700028e-01, 4.95985862e-01]],
                        atol=1e-14, rtol=1e-7) 
Example #5
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_cg(self):
        # conjugate gradient optimization routine
        if self.use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': self.disp,
                    'return_all': False}
            res = optimize.minimize(self.func, self.startparams, args=(),
                                    method='CG', jac=self.grad,
                                    options=opts)
            params, fopt, func_calls, grad_calls, warnflag = \
                res['x'], res['fun'], res['nfev'], res['njev'], res['status']
        else:
            retval = optimize.fmin_cg(self.func, self.startparams,
                                      self.grad, (), maxiter=self.maxiter,
                                      full_output=True, disp=self.disp,
                                      retall=False)
            (params, fopt, func_calls, grad_calls, warnflag) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 9, self.funccalls)
        assert_(self.gradcalls == 7, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[2:4],
                        [[0, -0.5, 0.5],
                         [0, -5.05700028e-01, 4.95985862e-01]],
                        atol=1e-14, rtol=1e-7) 
Example #6
Source File: S2HarmonicDensity.py    From lie_learn with MIT License 5 votes vote down vote up
def mle_cg(self, empirical_moments, eta_init=None, verbose=True):

        if eta_init is None:
            eta = np.zeros((self.L_max + 1) ** 2 - 1)
        else:
            eta = eta_init.copy()

        def objective(eta):
            logp, _ = self.log_p_and_grad(eta, empirical_moments)
            return -logp
        def grad(eta):
            _, grad = self.log_p_and_grad(eta, empirical_moments)
            return -grad
        eta_min, logp_min, fun_calls, grad_calls, warnflag = fmin_cg(f=objective, fprime=grad, x0=eta,
                                                                     full_output=True)

        if verbose:
            print('min log p:', logp_min)
            print('fun_calls:', fun_calls)
            print('grad_calls:', grad_calls)
            print('warnflag:', warnflag)
            #print 'allvecs:', allvecs

        # Finally, compute Z:
        _, lnZ = self.moments(eta_min)
        return eta_min, lnZ 
Example #7
Source File: gp.py    From nispat with GNU General Public License v3.0 5 votes vote down vote up
def estimate(self, hyp0, covfunc, X, y, optimizer='cg'):
        """ Function to estimate the model
        """
        if len(X.shape) == 1:
            X = X[:, np.newaxis]

        self.hyp0 = hyp0
        
        if optimizer.lower() == 'cg':  # conjugate gradients
            out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik,
                                   (covfunc, X, y), disp=True, gtol=self.tol,
                                   maxiter=self.n_iter, full_output=1)

        elif optimizer.lower() == 'powell':  # Powell's method
            out = optimize.fmin_powell(self.loglik, hyp0, (covfunc, X, y),
                                       full_output=1)
        else:
            raise ValueError("unknown optimizer")

        # Always return a 1d array. The optimizer sometimes changes dimesnions
        if len(out[0].shape) > 1:
            self.hyp = out[0].flatten()
        else:
            self.hyp = out[0]
        self.nlZ = out[1]
        self.optimizer = optimizer

        return self.hyp 
Example #8
Source File: nca_scipy.py    From curriculum with GNU General Public License v3.0 5 votes vote down vote up
def fit_conjugate_descent(self, A, X, Y):
        '''
        train on X and Y, supervised, to learn a matrix A, use conjugate descent
        maximize \sum_i \sum_{j \in C_i} frac{exp(-||Ax_i-Ax_j||^2)}{\sum_{k neq i} exp(-||Ax_i-Ax_k||^2)}
        @params X : 2-d numpy.array
        @params Y : 1-d numpy.array
        '''
        start = time.time()
        def costf(A):
            f, _ = self.nca_cost(A.reshape((self.high_dims, self.low_dims)), X, Y)
            return f

        def costg(A):
            _, g = self.nca_cost(A.reshape((self.high_dims, self.low_dims)), X, Y)
            return g

        # optimizer params
        self.A = fmin_cg(costf, A.ravel(), costg, maxiter = self.max_steps)
        self.A = self.A.reshape((self.high_dims, self.low_dims))

        end = time.time()
        train_time = end - start

        # print information
        if self.verbose:
            cls_name = self.__class__.__name__
            print("[{}] Traing took {:8.2f}s.".format(cls_name, train_time)) 
Example #9
Source File: trainLinearReg.py    From coursera-ml-py with MIT License 5 votes vote down vote up
def train_linear_reg(x, y, lmd):
    initial_theta = np.ones(x.shape[1])

    def cost_func(t):
        return lrcf.linear_reg_cost_function(t, x, y, lmd)[0]

    def grad_func(t):
        return lrcf.linear_reg_cost_function(t, x, y, lmd)[1]

    theta, *unused = opt.fmin_cg(cost_func, initial_theta, grad_func, maxiter=200, disp=False,
                                     full_output=True)

    return theta 
Example #10
Source File: oneVsAll.py    From coursera-ml-py with MIT License 5 votes vote down vote up
def one_vs_all(X, y, num_labels, lmd):
    # Some useful variables
    (m, n) = X.shape

    # You need to return the following variables correctly
    all_theta = np.zeros((num_labels, n + 1))

    # Add ones to the X data 2D-array
    X = np.c_[np.ones(m), X]

    for i in range(num_labels):
        print('Optimizing for handwritten number {}...'.format(i))
        # ===================== Your Code Here =====================
        # Instructions : You should complete the following code to train num_labels
        #                logistic regression classifiers with regularization
        #                parameter lambda
        #
        #
        # Hint: you can use y == c to obtain a vector of True(1)'s and False(0)'s that tell you
        #       whether the ground truth is true/false for this class
        #
        # Note: For this assignment, we recommend using opt.fmin_cg to optimize the cost
        #       function. It is okay to use a for-loop (for c in range(num_labels) to
        #       loop over the different classes
        #



        # ============================================================    
        print('Done')

    return all_theta 
Example #11
Source File: optimizer.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True,
                maxiter=100, callback=None, retall=False,
                full_output=True, hess=None):
    gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
    norm = kwargs.setdefault('norm', np.Inf)
    epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
    retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,
                               epsilon=epsilon, maxiter=maxiter,
                               full_output=full_output, disp=disp,
                               retall=retall, callback=callback)
    if full_output:
        if not retall:
            xopt, fopt, fcalls, gcalls, warnflag = retvals
        else:
            xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals
        converged = not warnflag
        retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
                   'warnflag': warnflag, 'converged': converged}
        if retall:
            retvals.update({'allvecs': allvecs})

    else:
        xopt = retvals
        retvals = None

    return xopt, retvals 
Example #12
Source File: NeuralNetwork.py    From MachineLearning_Python with MIT License 4 votes vote down vote up
def neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer):
    data_img = loadmat_data("data_digits.mat")
    X = data_img['X']
    y = data_img['y']

    '''scaler = StandardScaler()
    scaler.fit(X)
    X = scaler.transform(X)'''  
    
    m,n = X.shape
    """digits = datasets.load_digits()
    X = digits.data
    y = digits.target
    m,n = X.shape
    
    scaler = StandardScaler()
    scaler.fit(X)
    X = scaler.transform(X)"""
    
    ## 随机显示几行数据
    rand_indices = [t for t in [np.random.randint(x-x, m) for x in range(100)]]  # 生成100个0-m的随机数
    display_data(X[rand_indices,:])     # 显示100个数字    
    
    #nn_params = np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1)))
    
    Lambda = 1
    
    initial_Theta1 = randInitializeWeights(input_layer_size,hidden_layer_size); 
    initial_Theta2 = randInitializeWeights(hidden_layer_size,out_put_layer)
    
    initial_nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1)))  #展开theta    
    #np.savetxt("testTheta.csv",initial_nn_params,delimiter=",")
    start = time.time()
    result = optimize.fmin_cg(nnCostFunction, initial_nn_params, fprime=nnGradient, args=(input_layer_size,hidden_layer_size,out_put_layer,X,y,Lambda), maxiter=100)
    print (u'执行时间:',time.time()-start)
    print (result)
    '''可视化 Theta1'''
    length = result.shape[0]
    Theta1 = result[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)
    Theta2 = result[hidden_layer_size*(input_layer_size+1):length].reshape(out_put_layer,hidden_layer_size+1)    
    display_data(Theta1[:,1:length])
    display_data(Theta2[:,1:length])
    '''预测'''
    p = predict(Theta1,Theta2,X)
    print (u"预测准确度为:%f%%"%np.mean(np.float64(p == y.reshape(-1,1))*100))    
    res = np.hstack((p,y.reshape(-1,1)))
    np.savetxt("predict.csv", res, delimiter=',')
    

# 加载mat文件 
Example #13
Source File: scipy_minimizer.py    From pyiron with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def run_static(self):
        self.ref_job_initialize()
        self._logger.debug("cg status: " + str(self.status))
        self._run_again = True
        if self.ref_job.server.run_mode.interactive:
            self._run_again = False
        self.ref_job.run(run_again=self._run_again)
        self.status.running = True
        if self.input["minimizer"] == "CG":
            output = optimize.fmin_cg(
                f=self._update_energy,
                x0=self.ref_job.structure.positions.flatten(),
                fprime=self._update_forces,
                maxiter=self.input["ionic_steps"],
                gtol=self.input["ionic_forces"],
                disp=False,
                full_output=True,
            )
            self.output._convergence = output[4]
        elif self.input["minimizer"] == "BFGS":
            output = optimize.fmin_bfgs(
                f=self._update_energy,
                x0=self.ref_job.structure.positions.flatten(),
                fprime=self._update_forces,
                maxiter=self.input["ionic_steps"],
                gtol=self.input["ionic_forces"],
                disp=False,
                full_output=True,
            )
            self.output._hessian = output[3]
            self.output._convergence = output[6]
        elif self.input["minimizer"] == "simple":
            output = optimize.fmin(
                f=self._update_energy,
                x0=self.ref_job.structure.positions.flatten(),
                maxiter=self.input["ionic_steps"],
                gtol=self.input["ionic_forces"],
                disp=False,
                full_output=True,
            )
            self.output._hessian = output[4]
        self.status.collect = True
        self.collect_output()
        if self.ref_job.server.run_mode.interactive:
            self.ref_job.interactive_close() 
Example #14
Source File: pr.py    From AIF360 with Apache License 2.0 4 votes vote down vote up
def fit(self, X, y, ns=N_S, itype=0, **kwargs):
        """ train this model

        Parameters
        ----------
        X : array, shape = (n_samples, n_features)
            feature vectors of samples
        y : array, shape = (n_samples)
            target class of samples
        ns : int
            number of sensitive features. currently fixed to N_S
        itype : int
            type of initialization method
        kwargs : any
            arguments to optmizer
        """

        # rearrange input arguments
        s = np.atleast_1d(np.squeeze(np.array(X)[:, -ns]).astype(int))
        if self.fit_intercept:
            X = np.c_[np.atleast_2d(X)[:, :-ns], np.ones(X.shape[0])]
        else:
            X = np.atleast_2d(X)[:, :-ns]

        # check optimization parameters
        if not 'disp' in kwargs:
            kwargs['disp'] = False
        if not 'maxiter' in kwargs:
            kwargs['maxiter'] = 100

        # set instance variables
        self.n_s_ = ns
        self.n_sfv_ = np.max(s) + 1
        self.c_s_ = np.array([np.sum(s == si).astype(np.float)
                              for si in range(self.n_sfv_)])
        self.n_features_ = X.shape[1]
        self.n_samples_ = X.shape[0]

        # optimization
        self.init_coef(itype, X, y, s)
        self.coef_ = fmin_cg(self.loss,
                             self.coef_,
                             fprime=self.grad_loss,
                             args=(X, y, s),
                             **kwargs)

        # get final loss
        self.f_loss_ = self.loss(self.coef_, X, y, s)