Python scipy.optimize.minimize_scalar() Examples

The following are 30 code examples of scipy.optimize.minimize_scalar(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: fitting_functions.py    From DynaPhoPy with MIT License 4 votes vote down vote up
def get_fitting(self):
        from scipy.integrate import quad

        try:

            fit_params, fit_covariances = self.get_fitting_parameters()

            peak_pos = minimize_scalar(lambda x: -self._function(x, *fit_params), fit_params[0],
                                       bounds=[self.test_frequencies_range[0], self.test_frequencies_range[-1]],
                                       method='bounded')

            frequency = peak_pos["x"]
            maximum = -peak_pos["fun"]
            width = 2.0 * self._g_a(frequency, fit_params[0], fit_params[1], fit_params[4])
            asymmetry = fit_params[4]

            area, error_integration = quad(self._function, 0, self.test_frequencies_range[-1],
                                           args=tuple(fit_params),
                                           epsabs=1e-8)
        #    area = fit_params[2]

            standard_errors = get_standard_errors_from_covariance(fit_covariances)
            global_error = np.average(standard_errors[:2])/np.sqrt(area)
            if np.isnan(global_error):
                raise RuntimeError

            #error = get_error_from_covariance(fit_covariances)
            base_line = fit_params[3]

            return {'maximum': maximum,
                    'width': width,
                    'peak_position': frequency,
                    'global_error': global_error,
                    'area': area,
                    'base_line': base_line,
                    'asymmetry': asymmetry,
                    'all_good': True}

        except RuntimeError:
            return {'all_good': False} 
Example #2
Source File: stable.py    From copulae with MIT License 4 votes vote down vote up
def _mode(cls, alpha: float, beta: float, beta_max=1 - 1e-11):
        cls._check_parameters(alpha, beta)

        if alpha * beta == 0:
            return 0

        beta = max(beta, beta_max)
        bounds = sorted([0, np.sign(beta) * -0.7])

        return float(opt.minimize_scalar(lambda x: -cls.pdf(x, alpha, beta), bounds=bounds)['x']) 
Example #3
Source File: zfit.py    From picasso with MIT License 4 votes vote down vote up
def fit_z(locs, info, calibration, magnification_factor, filter=2):
    cx = _np.array(calibration["X Coefficients"])
    cy = _np.array(calibration["Y Coefficients"])
    z = _np.zeros_like(locs.x)
    square_d_zcalib = _np.zeros_like(z)
    sx = locs.sx
    sy = locs.sy
    for i in range(len(z)):
        result = _minimize_scalar(_fit_z_target, args=(sx[i], sy[i], cx, cy))
        z[i] = result.x
        square_d_zcalib[i] = result.fun
    z *= magnification_factor
    locs = _lib.append_to_rec(locs, z, "z")
    locs = _lib.append_to_rec(locs, _np.sqrt(square_d_zcalib), "d_zcalib")
    locs = _lib.ensure_sanity(locs, info)
    return filter_z_fits(locs, filter) 
Example #4
Source File: test_hyperbolic_radius.py    From pvae with MIT License 4 votes vote down vote up
def test_impl_rep(self):
        torch.manual_seed(1239)
        self.c = torch.tensor([.5])
        self.dim = 5
        self.scale = torch.tensor([0.8, 1.5]).unsqueeze(-1)
        self.scale.requires_grad=True
        self.d = HyperbolicRadius(self.dim, self.c, self.scale)
        N = 10
        x = self.d.rsample(torch.Size([N])).squeeze(0)
        x.backward(torch.ones_like(x))

        u = self.d.cdf(x).squeeze(-1).detach().numpy()
        delta = 1e-4
        scale2 = self.scale.squeeze(-1).detach().numpy() + delta
        scale1 = self.scale.squeeze(-1).detach().numpy() - delta
        def fun(x, u, scale):
            res = np.abs(cdf_r(torch.tensor(x), torch.tensor(scale), self.c, self.dim).detach().numpy() - u)
            if x < 0: res += np.exp(-10*x)
            return res

        grad_approx = torch.zeros(N, self.scale.size(0))
        for i in range(N):
            for j in range(self.scale.size(0)):
                res2 = minimize_scalar(fun, args=(u[i, [j]], scale2[[j]]), tol=1e-7)
                res1 = minimize_scalar(fun, args=(u[i, [j]], scale1[[j]]), tol=1e-7)
                grad_approx[i, j] = float((res2.x - res1.x) / 2 / delta)
        torch.testing.assert_allclose(self.scale.grad.view(-1), grad_approx.sum(0).view(-1), rtol=0.05, atol=.5) 
Example #5
Source File: merger_models.py    From treetime with MIT License 4 votes vote down vote up
def optimize_Tc(self):
        '''
        determines the coalescent time scale that optimizes the coalescent likelihood of the tree
        '''
        from scipy.optimize import minimize_scalar
        initial_Tc = self.Tc
        def cost(Tc):
            self.set_Tc(Tc)
            return -self.total_LH()

        sol = minimize_scalar(cost, bounds=[ttconf.TINY_NUMBER,10.0])
        if "success" in sol and sol["success"]:
            self.set_Tc(sol['x'])
        else:
            self.logger("merger_models:optimize_Tc: optimization of coalescent time scale failed: " + str(sol), 0, warn=True)
            self.set_Tc(initial_Tc.y, T=initial_Tc.x) 
Example #6
Source File: treeanc.py    From treetime with MIT License 4 votes vote down vote up
def optimize_gtr_rate(self):
        """Estimate the overal rate of the GTR model by optimizing the full
        likelihood of the sequence data.
        """
        from scipy.optimize import minimize_scalar
        def cost_func(sqrt_mu):
            self.gtr.mu = sqrt_mu**2
            self.postorder_traversal_marginal()
            self.total_LH_and_root_sequence(sample_from_profile=False,
                                            assign_sequence=False)
            return -self.sequence_LH()

        old_mu = self.gtr.mu
        try:
            sol = minimize_scalar(cost_func,bracket=[0.01*np.sqrt(old_mu), np.sqrt(old_mu),100*np.sqrt(old_mu)])
        except:
            self.gtr.mu=old_mu
            self.logger('treeanc:optimize_gtr_rate: optimization failed, continuing with previous mu',1,warn=True)
            return

        if sol['success']:
            self.gtr.mu = sol['x']**2
            self.logger('treeanc:optimize_gtr_rate: optimization successful. Overall rate estimated to be %f'%self.gtr.mu,1)
        else:
            self.gtr.mu=old_mu
            self.logger('treeanc:optimize_gtr_rate: optimization failed, continuing with previous mu',1,warn=True)


###############################################################################
### Utility functions
############################################################################### 
Example #7
Source File: treeregression.py    From treetime with MIT License 4 votes vote down vote up
def _optimal_root_along_branch(self, n, tv, bv, var, slope=None):
        from scipy.optimize import minimize_scalar
        def chisq(x):
            tmpQ = self.propagate_averages(n, tv, bv*x, var*x) \
                 + self.propagate_averages(n, tv, bv*(1-x), var*(1-x), outgroup=True)
            return base_regression(tmpQ, slope=slope)['chisq']

        if n.bad_branch or (n!=self.tree.root and n.up.bad_branch):
            return np.nan, np.inf


        chisq_prox = np.inf if n.is_terminal() else base_regression(n.Qtot, slope=slope)['chisq']
        chisq_dist = np.inf if n==self.tree.root else base_regression(n.up.Qtot, slope=slope)['chisq']

        grid = np.linspace(0.001,0.999,6)
        chisq_grid = np.array([chisq(x) for x in grid])
        min_chisq = chisq_grid.min()
        if chisq_prox<=min_chisq:
            return 0.0, chisq_prox
        elif chisq_dist<=min_chisq:
            return 1.0, chisq_dist
        else:
            ii = np.argmin(chisq_grid)
            bounds = (0 if ii==0 else grid[ii-1], 1.0 if ii==len(grid)-1 else grid[ii+1])
            sol = minimize_scalar(chisq, bounds=bounds, method="bounded")
            if sol["success"]:
                return sol['x'], sol['fun']
            else:
                return np.nan, np.inf 
Example #8
Source File: NonLinearLeastSquares.py    From FaceSwap with MIT License 4 votes vote down vote up
def GaussNewton(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
    x = np.array(x0, dtype=np.float64)

    oldCost = -1
    for i in range(maxIter):
        r = fun(x, *args)
        cost = np.sum(r**2)

        if verbose > 0:
            print "Cost at iteration " + str(i) + ": " + str(cost)

        if (cost < eps or abs(cost - oldCost) < eps):
            break
        oldCost = cost

        J = funJack(x, *args)
        grad = np.dot(J.T, r)
        H = np.dot(J.T, J)
        direction = np.linalg.solve(H, grad)

        #optymalizacja dlugosci kroku
        lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
        #dlugosc kroku
        alpha = lineSearchRes["x"]

        x = x + alpha * direction
        
    if verbose > 0:
        print "Gauss Netwon finished after "  + str(i + 1) + " iterations"
        r = fun(x, *args)
        cost = np.sum(r**2)
        print "cost = " + str(cost)
        print "x = " + str(x)

    return x 
Example #9
Source File: NonLinearLeastSquares.py    From FaceSwap with MIT License 4 votes vote down vote up
def SteepestDescent(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
    x = np.array(x0, dtype=np.float64)

    oldCost = -1
    for i in range(maxIter):
        r = fun(x, *args)
        cost = np.sum(r**2)

        if verbose > 0:
            print "Cost at iteration " + str(i) + ": " + str(cost)

        #warunki stopu
        if (cost < eps or abs(cost - oldCost) < eps):
            break
        oldCost = cost

        J = funJack(x, *args)
        grad = 2 * np.dot(J.T, r)
        direction = grad

        #optymalizacja dlugosci kroku
        lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
        #dlugosc kroku
        alpha = lineSearchRes["x"]

        x = x + alpha * direction

    if verbose > 0:
        print "Steepest Descent finished after "  + str(i + 1) + " iterations"
        r = fun(x, *args)
        cost = np.sum(r**2)
        print "cost = " + str(cost)
        print "x = " + str(x)


    return x 
Example #10
Source File: functions.py    From wa with Apache License 2.0 4 votes vote down vote up
def calculate_first_round(df, pixel_pars, infz_bounds,
                          tolerance_yearly_waterbal):
    '''
    Calculates the water balance for a single cell and a single year on
    green pixels
    '''
    # Pixel parameters
    thetasat, rootdepth, qratio, baseflow_filter = pixel_pars
    # LAI and soil moisture calculations
    df = lai_and_soil_calculations(df, thetasat, rootdepth)
    # Check P-ET-dsm
    p_et_dsm = sum(df['p']) - sum(df['et']) - sum(df['dsm'])
    if p_et_dsm <= 0:
        df = return_empty_df_columns(df)
        second_round = 20
    elif np.isnan(p_et_dsm):
        df = return_empty_df_columns(df)
        second_round = 0
    else:
        # Vegetation and interception calculations
        df = veg_int_calc(df)
        # Numeric solver
        minerror_infz = minimize_scalar(flows_calculations_first_round,
                                        bounds=infz_bounds,
                                        args=(df, (qratio, baseflow_filter,
                                                   p_et_dsm), True),
                                        method='bounded')
        # Flows calculation
        df = flows_calculations_first_round(minerror_infz.x, df,
                                            (qratio, baseflow_filter,
                                             p_et_dsm), False)
        # Water balance fix for when percolation is artificially set to 0
        maski = (df['p'] - df['et'] - df['Qsw'] - df['dsm']) < 0
        if np.any(maski):
            df['Qsw'][maski] = df['p'][maski] - df['et'][maski] - df['dsm'][maski]
        df['Qsw'] = df['Qsw'].apply(pos_func, 'columns')    
            
        if sqrt(minerror_infz.fun) > tolerance_yearly_waterbal:
            df = return_empty_df_columns(df)
            second_round = 30
        else:
            df['Qtot'] = df['Qsw'] + df['Qgw']
            second_round = 0
    # Total runoff
    df_out = df[['Qsw', 'Qgw', 'Qtot', 'dsm', 'infz', 'thetarz', 'perc']]
    # Return data frame and second_round boolean
    return df_out, second_round 
Example #11
Source File: scienceinstrument.py    From soapy with GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, soapyConfig, nSci=0, mask=None):
        scienceCam.__init__(self, soapyConfig, nSci, mask)

        self.normMask = self.mask / numpy.sqrt(numpy.sum(numpy.abs(self.mask)**2))
        self.fibreSize = opt.minimize_scalar(self.refCouplingLoss, bracket=[1.0, self.sim_size]).x
        self.refStrehl = 1.0 - self.refCouplingLoss(self.fibreSize)
        self.fibre_efield = self.fibreEfield(self.fibreSize)
        print("Coupling efficiency: {0:.3f}".format(self.refStrehl)) 
Example #12
Source File: NonLinearLeastSquares.py    From DAMDNet with Apache License 2.0 4 votes vote down vote up
def SteepestDescent(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
    x = np.array(x0, dtype=np.float64)

    oldCost = -1
    for i in range(maxIter):
        r = fun(x, *args)
        cost = np.sum(r**2)

        if verbose > 0:
            print ("Cost at iteration " + str(i) + ": " + str(cost))

        #warunki stopu
        if (cost < eps or abs(cost - oldCost) < eps):
            break
        oldCost = cost

        J = funJack(x, *args)
        grad = 2 * np.dot(J.T, r)
        direction = grad

        #optymalizacja dlugosci kroku
        lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
        #dlugosc kroku
        alpha = lineSearchRes["x"]

        x = x + alpha * direction

    if verbose > 0:
        print ("Steepest Descent finished after "  + str(i + 1) + " iterations")
        r = fun(x, *args)
        cost = np.sum(r**2)
        print ("cost = " + str(cost))
        print ("x = " + str(x))


    return x 
Example #13
Source File: camera.py    From camera.py with MIT License 4 votes vote down vote up
def calibrate_division_model(line_coordinates, y0, z_n, focal_length=1):
    """
    Calibrate division model by making lines straight.

    :param line_coordinates: coordinates of points on lines
    :type line_coordinates: np.ndarray, shape=(nlines, npoints_per_line, 2)
    :param y0: radial distortion center xy coordinates
    :type y0: array-like, len=2
    :param z_n: distance to boundary (pincushion: image width / 2, barrel: image diagonal / 2)
    :type z_n: float
    :param focal_length: focal length of the camera (optional)
    :type focal_length: float
    :return: Camera object with calibrated division model parameter lambda
    :rtype: Camera
    """

    def lines_fit_error(p, line_coordinates, cam):
        if not (-1 < p < 1):
            return np.inf
        assert line_coordinates.ndim == 3
        cam.division_lambda = p
        error = 0.
        for line in xrange(line_coordinates.shape[0]):
            xy = cam.undistort(line_coordinates[line].T)
            mc = fit_line(xy)
            d = line_point_distance(xy, mc)
            nearest_xy = nearest_point_on_line(xy, mc)
            line_length_sq = np.sum((nearest_xy[:, 0] - nearest_xy[:, -1]) ** 2)
            error += np.sum(d ** 2) / line_length_sq / line_coordinates.shape[1]
    #        plt.plot(x, mc[0] * x + mc[1], 'y')
    #        plt.plot(nx, ny, 'y+')
    #        plt.plot(x, y, 'r+')
    #        plt.show()
        return error

    c = Camera()
    c.set_K_elements(u0_px=y0[0], v0_px=y0[1], f=focal_length)
    c.calibration_type = 'division'
    c.division_z_n = z_n
    res = minimize_scalar(lambda p: lines_fit_error(p, line_coordinates, c))
    c.division_lambda = float(res.x)
    return c 
Example #14
Source File: distance_measures.py    From forest-benchmarking with Apache License 2.0 4 votes vote down vote up
def quantum_chernoff_bound(rho: np.ndarray,
                           sigma: np.ndarray,
                           tol: float = 1000) -> Tuple[float, float]:
    r"""
    Computes the quantum Chernoff bound between rho and sigma.

    It is defined as

    .. math::

        ξ_{QCB}(\rho, \sigma) = - \log[ \min_{0\le s\le 1} tr(\rho^s \sigma^{1-s}) ]

    It is also common to study the non-logarithmic variety of the quantum Chernoff bound

    .. math::

        Q_{QCB}(\rho, \sigma) = \min_{0\le s\le 1} tr(\rho^s \sigma^{1-s})

    The quantum Chernoff bound has many nice properties, see [QCB]_. Importantly it is
    operationally important in the following context. Given n copies of rho or sigma the minimum
    error probability for discriminating rho from sigma is :math:`P_{e,min,n} ~ exp[-n ξ_{QCB}]`.

    .. [QCB] The Quantum Chernoff Bound.
          Audenaert et al.
          Phys. Rev. Lett. 98, 160501 (2007).
          https://dx.doi.org/10.1103/PhysRevLett.98.160501
          https://arxiv.org/abs/quant-ph/0610027

    :param rho: Is a dim by dim positive matrix with unit trace.
    :param sigma: Is a dim by dim positive matrix with unit trace.
    :param tol: Tolerance in machine epsilons for np.real_if_close.
    :return: The non-logarithmic quantum Chernoff bound and the s achieving the minimum.
    """

    def f(s):
        s = np.real_if_close(s)
        return np.trace(
            np.matmul(fractional_matrix_power(rho, s), fractional_matrix_power(sigma, 1 - s)))

    f_min = minimize_scalar(f, bounds=(0, 1), method='bounded')
    s_opt = np.real_if_close(f_min.x, tol)
    qcb = np.real_if_close(f_min.fun, tol)
    return qcb, s_opt 
Example #15
Source File: Guerrero.py    From tbats with MIT License 4 votes vote down vote up
def minimize(self, y, bounds):
        self.y = y
        result = minimize_scalar(
            self.guerrero_coefficient_of_variation,
            method='bounded',
            bounds=bounds,
            options=dict(
                xatol=1e-8,
            )

        )
        return result.x 
Example #16
Source File: gap.py    From quantum-honeycomp with GNU General Public License v3.0 4 votes vote down vote up
def minimize_gap(f,tol=0.001,bounds=(0,1.)):
  """Miimizes the gap of the system, the argument is between 0 and 1"""
  return f(minimize_scalar(f,method="Bounded",bounds=bounds,tol=tol).x) 
Example #17
Source File: optimizer.py    From pyirt with MIT License 4 votes vote down vote up
def solve_param_scalar(self):
        def target_fnc(x):
            return self._likelihood(self.res_data, x, self.alpha_vec, self.beta_vec, self.c_vec)
        res = minimize_scalar(target_fnc, bounds=self.bnds, method='bounded')
        return res.x 
Example #18
Source File: isotropic_solver.py    From tyssue with GNU General Public License v3.0 4 votes vote down vote up
def bruteforce_isotropic_relax(eptm, geom, model):
    def to_minimize(scale):
        return scaled_unscaled(model.compute_energy, scale, eptm, geom, args=[eptm])

    optim_res = optimize.minimize_scalar(
        to_minimize, method="bounded", bounds=[1e-6, 1e2]
    )
    if optim_res["success"]:
        log.info("Scaling by factor {:.3f}".format(optim_res["x"]))
        scale = optim_res["x"]
        geom.scale(eptm, scale, eptm.coords)
        geom.update_all(eptm)
    else:
        warnings.warn("Optimisation failed")

    return optim_res 
Example #19
Source File: abspline.py    From pygsp with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self, G, Nf=6, lpfactor=20, scales=None):

        def kernel_abspline3(x, alpha, beta, t1, t2):
            M = np.array([[1, t1, t1**2, t1**3],
                          [1, t2, t2**2, t2**3],
                          [0, 1, 2*t1, 3*t1**2],
                          [0, 1, 2*t2, 3*t2**2]])
            v = np.array([1, 1, t1**(-alpha) * alpha * t1**(alpha - 1),
                          -beta*t2**(- beta - 1) * t2**beta])
            a = np.linalg.solve(M, v)

            r1 = x <= t1
            r2 = (x >= t1)*(x < t2)
            r3 = (x >= t2)

            if isinstance(x, np.float64):

                if r1:
                    r = x[r1]**alpha * t1**(-alpha)
                if r2:
                    r = a[0] + a[1] * x + a[2] * x**2 + a[3] * x**3
                if r3:
                    r = x[r3]**(-beta) * t2**beta

            else:
                r = np.zeros(x.shape)

                x2 = x[r2]

                r[r1] = x[r1]**alpha * t1**(-alpha)
                r[r2] = a[0] + a[1] * x2 + a[2] * x2**2 + a[3] * x2**3
                r[r3] = x[r3]**(-beta) * t2 ** beta

            return r

        self.lpfactor = lpfactor

        lmin = G.lmax / lpfactor

        if scales is None:
            scales = utils.compute_log_scales(lmin, G.lmax, Nf - 1)
        self.scales = scales

        gb = lambda x: kernel_abspline3(x, 2, 2, 1, 2)
        gl = lambda x: np.exp(-np.power(x, 4))

        lminfac = .4 * lmin

        g = [lambda x: 1.2 * np.exp(-1) * gl(x / lminfac)]
        for i in range(0, Nf - 1):
            g.append(lambda x, i=i: gb(self.scales[i] * x))

        f = lambda x: -gb(x)
        xstar = optimize.minimize_scalar(f, bounds=(1, 2),
                                         method='bounded')
        gamma_l = -f(xstar.x)
        lminfac = .6 * lmin
        g[0] = lambda x: gamma_l * gl(x / lminfac)

        super(Abspline, self).__init__(G, g) 
Example #20
Source File: snap.py    From crazyswarm with MIT License 4 votes vote down vote up
def u1_peak(self, k):
		T = k * self.Tr
		self.z.cost(T)
		self.z.T = T
		self.z.p = self.z.p.reshape((-1, self.z.order + 1))
		bnds = self.get_bounds(self.u1, k)
		u1_res = minimize_scalar(lambda t: -self.u1(t), bounds=bnds, method='bounded')
		self.peaks[0] = np.abs(self.u1(u1_res.x))
		return self.peaks[0] 
Example #21
Source File: snap.py    From crazyswarm with MIT License 4 votes vote down vote up
def theta_peak(self, k):
		T = k * self.Tr
		self.x.cost(T)
		self.x.T = T
		self.x.p = self.x.p.reshape((-1, self.x.order + 1))
		bnds = self.get_bounds(lambda t: np.abs(self.theta(t)), k)
		theta_res = minimize_scalar(lambda t: -np.abs(self.theta(t)), bounds=bnds, method='bounded')
		self.peaks[3] = np.abs(self.theta(theta_res.x))
		return self.peaks[3] 
Example #22
Source File: snap.py    From crazyswarm with MIT License 4 votes vote down vote up
def phi_peak(self, k):
		T = k * self.Tr
		self.y.cost(T)
		self.y.T = T
		self.y.p = self.y.p.reshape((-1, self.y.order + 1))
		bnds = self.get_bounds(lambda t: np.abs(self.phi(t)), k)
		phi_res = minimize_scalar(lambda t: -np.abs(self.phi(t)), bounds=bnds, method='bounded')
		self.peaks[4] = np.abs(self.phi(phi_res.x))
		return self.peaks[4]		

	# Returns a set of bounds for the minimizer to use. Necessary for accurate 
	# minimization of non-convex functions such as u1, u2, u3, and u4. This is 
	# a BRUTE FORCE method. We take rezo samples per piecewise polynomial
	# segment, find the time which results in the maximum, and return the two
	# time samples adjacent to that time. 
Example #23
Source File: NonLinearLeastSquares.py    From DAMDNet with Apache License 2.0 4 votes vote down vote up
def GaussNewton(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
    x = np.array(x0, dtype=np.float64)

    oldCost = -1
    for i in range(maxIter):
        r = fun(x, *args)
        cost = np.sum(r**2)

        if verbose > 0:
            print ("Cost at iteration " + str(i) + ": " + str(cost))

        if (cost < eps or abs(cost - oldCost) < eps):
            break
        oldCost = cost

        J = funJack(x, *args)
        grad = np.dot(J.T, r)
        H = np.dot(J.T, J)
        direction = np.linalg.solve(H, grad)

        #optymalizacja dlugosci kroku
        lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
        #dlugosc kroku
        alpha = lineSearchRes["x"]

        x = x + alpha * direction
        
    if verbose > 0:
        print ("Gauss Netwon finished after "  + str(i + 1) + " iterations")
        r = fun(x, *args)
        cost = np.sum(r**2)
        print ("cost = " + str(cost))
        print ("x = " + str(x))

    return x 
Example #24
Source File: transform.py    From vnpy_crypto with MIT License 4 votes vote down vote up
def _loglik_boxcox(self, x, bounds, options={'maxiter': 25}):
        """
        Taken from the Stata manual on Box-Cox regressions, where this is the
        special case of 'lhs only'. As an estimator for the variance, the
        sample variance is used, by means of the well-known formula.

        Parameters
        ----------
        x : array_like
        options : dict
            The options (as a dict) to be passed to the optimizer.
        """
        sum_x = np.sum(np.log(x))
        nobs = len(x)

        def optim(lmbda):
            y, lmbda = self.transform_boxcox(x, lmbda)
            return (1 - lmbda) * sum_x + (nobs / 2.) * np.log(np.var(y))

        res = minimize_scalar(optim,
                              bounds=bounds,
                              method='bounded',
                              options=options)
        return res.x 
Example #25
Source File: curate_data.py    From AMPL with MIT License 4 votes vote down vote up
def mle_censored_mean(cmpd_df, std_est, value_col='PIC50', relation_col='relation'):
    """
    Compute a maximum likelihood estimate of the true mean value underlying the distribution of replicate assay measurements for a
    single compound. The data may be a mix of censored and uncensored measurements, as indicated by the 'relation' column in the input
    data frame cmpd_df. std_est is an estimate for the standard deviation of the distribution, which is assumed to be Gaussian;
    we typically compute a common estimate for the whole dataset using replicate_rmsd().
    """
    left_censored = np.array(cmpd_df[relation_col].values == '<', dtype=bool)
    right_censored = np.array(cmpd_df[relation_col].values == '>' , dtype=bool)
    not_censored = ~(left_censored | right_censored)
    n_left_cens = sum(left_censored)
    n_right_cens = sum(right_censored)
    nreps = cmpd_df.shape[0]
    values = cmpd_df[value_col].values
    nan = float('nan')

    relation = ''
    # If all the replicate values are left- or right-censored, return the smallest or largest reported (threshold) value accordingly.
    if n_left_cens == nreps:
        mle_value = min(values)
        relation = '<'
    elif n_right_cens == nreps:
        mle_value = max(values)
        relation = '>'
    elif n_left_cens + n_right_cens == 0:
        # If no values are censored, the MLE is the actual mean.
        mle_value = np.mean(values)
    else:
        # Some, but not all observations are censored.
        # First, define the negative log likelihood function
        def loglik(mu):
            ll = -sum(norm.logpdf(values[not_censored], loc=mu, scale=std_est))
            if n_left_cens > 0:
                ll -= sum(norm.logcdf(values[left_censored], loc=mu, scale=std_est))
            if n_right_cens > 0:
                ll -= sum(norm.logsf(values[right_censored], loc=mu, scale=std_est))
            return ll

        # Then minimize it
        opt_res = minimize_scalar(loglik, method='brent')
        if not opt_res.success:
            print('Likelihood maximization failed, message is: "%s"' % opt_res.message)
            mle_value = nan
        else:
            mle_value = opt_res.x
    return mle_value, relation


# ****************************************************************************************************************************************** 
Example #26
Source File: binarization.py    From pySCENIC with GNU General Public License v3.0 4 votes vote down vote up
def derive_threshold(auc_mtx: pd.DataFrame, regulon_name: str, seed=None, method: str = 'hdt') -> float:
    '''
    Derive threshold on the AUC values of the given regulon to binarize the cells in two clusters: "on" versus "off"
    state of the regulator.

    :param auc_mtx: The dataframe with the AUC values for all cells and regulons (n_cells x n_regulons).
    :param regulon_name: the name of the regulon for which to predict the threshold.
    :param method: The method to use to decide if the distribution of AUC values for the given regulon is not unimodel.
        Can be either Hartigan's Dip Test (HDT) or Bayesian Information Content (BIC). The former method performs better
        but takes considerable more time to execute (40min for 350 regulons). The BIC compares the BIC for two Gaussian
        Mixture Models: single versus two components.
    :return: The threshold on the AUC values.
    '''
    assert auc_mtx is not None and not auc_mtx.empty
    assert regulon_name in auc_mtx.columns
    assert method in {'hdt', 'bic'}

    data = auc_mtx[regulon_name].values

    if seed:
        np.random.seed(seed=seed)

    def isbimodal(data, method):
        if method == 'hdt':
            # Use Hartigan's dip statistic to decide if distribution deviates from unimodality.
            _, pval, _ = diptst(np.msort(data))
            return (pval is not None) and (pval <= 0.05)
        else:
            # Compare Bayesian Information Content of two Gaussian Mixture Models.
            X = data.reshape(-1, 1)
            gmm2 = mixture.GaussianMixture(n_components=2, covariance_type='full', random_state=seed).fit(X)
            gmm1 = mixture.GaussianMixture(n_components=1, covariance_type='full', random_state=seed).fit(X)
            return gmm2.bic(X) <= gmm1.bic(X)

    if not isbimodal(data, method):
        # For a unimodal distribution the threshold is set as mean plus two standard deviations.
        return data.mean() + 2.0*data.std()
    else:
        # Fit a two component Gaussian Mixture model on the AUC distribution using an Expectation-Maximization algorithm
        # to identify the peaks in the distribution.
        gmm2 = mixture.GaussianMixture(n_components=2, covariance_type='full', random_state=seed).fit(data.reshape(-1, 1))
        # For a bimodal distribution the threshold is defined as the "trough" in between the two peaks.
        # This is solved as a minimization problem on the kernel smoothed density.
        return minimize_scalar(fun=stats.gaussian_kde(data),
                               bounds=sorted(gmm2.means_),
                               method='bounded').x[0] 
Example #27
Source File: fret_fit.py    From FRETBursts with GNU General Public License v2.0 4 votes vote down vote up
def fit_E_binom(nd, na, noprint=False, method='c', **kwargs):
    """Fit the E with MLE using binomial distribution.
    method  ('a','b', or 'c') choose how to handle negative (nd,na) values.
    """
    assert method in ['a', 'b', 'c']
    nd, na = np.round(nd).astype(int), np.round(na).astype(int)

    # The binomial distribution can not handle negative values
    # so we must find a way to "remove" the negativa values
    # a. remove bursts with neg. values, but we can skew the fit
    # b. remove bursts with nd < nd[na<0].max(), but few bursts left
    # c. remove bursts with na+nd < max(nd[na<0].max(),na[nd<0].max())
    #    giving a bit more bursts
    # NOTE: b and c have corner cases in which there are neg. bursts left
    pos_bursts = (nd>=0)*(na>=0)
    if (-pos_bursts).any():
        if not noprint: print("WARNING: Discarding negative burst sizes.")
        if method == 'a':
            # a. remove bursts with neg. values
            nd, na = nd[pos_bursts], na[pos_bursts]
        elif method == 'b':
            # b. Cut all the part with na<0 to have a less skewed estimation
            if (na < 0).any():
                nd_min = nd[na<0].max()
                nd, na = nd[nd>nd_min], na[nd>nd_min]
        elif method == 'c':
            # c. remove bursts with na+nd < max(nd[na<0].max(),na[nd<0].max())
            nt_min = 0
            if (na < 0).any():
                nt_min = nd[na<0].max()
            if (nd < 0).any():
                nt_min = max([na[nd<0].max(), nt_min])
            nd, na = nd[nd+na>nt_min], na[nd+na>nt_min]

        if not noprint: print(" - Bursts left:", nd.size)
        assert (nd>=0).all() and (na>=0).all()
    min_kwargs = dict(bounds=(0,1), method='bounded',
            options={'disp':1, 'xtol': 1e-6})
    min_kwargs.update(**kwargs)
    res = minimize_scalar(log_likelihood_binom, args=(nd,na), **min_kwargs)
    return res.x 
Example #28
Source File: fret_fit.py    From FRETBursts with GNU General Public License v2.0 4 votes vote down vote up
def fit_E_poisson_nt(nd, na, bg_a, **kwargs):
    """Fit the E using MLE with na extracted from a Poisson.
    """
    min_kwargs = dict(bounds=(-0.1,1.1), method='bounded',
            options={'disp':1, 'xtol': 1e-6})
    min_kwargs.update(**kwargs)
    res = minimize_scalar(log_likelihood_poisson_nt, args=(nd, na, bg_a),
            **min_kwargs)
    E = res.x
    return E 
Example #29
Source File: fret_fit.py    From FRETBursts with GNU General Public License v2.0 4 votes vote down vote up
def fit_E_poisson_na(nd, na, bg_a, **kwargs):
    """Fit the E using MLE with na extracted from a Poisson.
    """
    min_kwargs = dict(bounds=(-0.1,1.1), method='bounded',
            options={'disp':1, 'xtol': 1e-6})
    min_kwargs.update(**kwargs)
    res = minimize_scalar(log_likelihood_poisson_na, args=(nd, na, bg_a),
            **min_kwargs)
    E = res.x
    return E 
Example #30
Source File: fret_fit.py    From FRETBursts with GNU General Public License v2.0 4 votes vote down vote up
def fit_E_poisson_nd(nd, na, bg_d, **kwargs):
    """Fit the E using MLE with nd extracted from a Poisson.
    """
    min_kwargs = dict(bounds=(-0.1,1.1), method='bounded',
            options={'disp':1, 'xtol': 1e-6})
    min_kwargs.update(**kwargs)
    res = minimize_scalar(log_likelihood_poisson_nd, args=(nd, na, bg_d),
            **min_kwargs)
    E = res.x
    return E