Python numdifftools.Hessian() Examples

The following are 23 code examples of numdifftools.Hessian(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numdifftools , or try the search function .
Example #1
Source File: profile_numdifftools.py    From numdifftools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def profile_hessian(n_values=(4, 8, 16, 32, 64, 96)):
    for n in n_values:
        f = BenchmarkFunction(n)

        step = nd.step_generators.one_step
        cls = nd.Hessian(f, step=step, method='central')
        follow = [cls._derivative_nonzero_order,
                  cls._apply_fd_rule,
                  cls._get_finite_difference_rule,
                  cls._vstack,
                  cls._central_even]
#         cls = nds.Hessian(f, step=None, method='central')
#         follow = [cls._derivative_nonzero_order, ]

        x = 3 * np.ones(n)

        do_profile(follow=follow)(cls)(x) 
Example #2
Source File: differentiation.py    From threeML with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_hessian(function, point, minima, maxima):

    wrapper, scaled_deltas, scaled_point, orders_of_magnitude, n_dim = _get_wrapper(
        function, point, minima, maxima
    )

    # Compute the Hessian matrix at best_fit_values

    hessian_matrix_ = nd.Hessian(wrapper, scaled_deltas)(scaled_point)

    # Transform it to numpy matrix

    hessian_matrix = np.array(hessian_matrix_)

    # Now correct back the Hessian for the scales
    for i in range(n_dim):

        for j in range(n_dim):

            hessian_matrix[i, j] /= orders_of_magnitude[i] * orders_of_magnitude[j]

    return hessian_matrix 
Example #3
Source File: test_autograd.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def check_gradient(f, x):
    print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2) 
Example #4
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def information(self, params):
        """
        Fisher information matrix of model

        Returns -Hessian of loglike evaluated at params.
        """
        raise NotImplementedError 
Example #5
Source File: classic.py    From scikit-extremes with MIT License 5 votes vote down vote up
def _nnlf(self, theta):
        # This is used to calculate the variance-covariance matrix using the
        # Hessian from numdifftools
        # see self._ci_delta() method below
        
        x = self.data
        
        # Here we provide code for the GEV distribution and for the special
        # case when shape parameter is 0 (Gumbel distribution).
        if len(theta) == 3:
            c = theta[0]
            loc = theta[1]
            scale = theta[2]
        if len(theta) == 2:
            c = 0
            loc = theta[0]
            scale = theta[1]
        if c != 0:
            expr = 1. + c * ((x - loc) / scale)
            return (len(x) * _np.log(scale) + 
                   (1. + 1. / c) * _np.sum(_np.log(expr)) +
                   _np.sum(expr ** (-1. / c)))
        else:
            expr = (x - loc) / scale
            return (len(x) * _np.log(scale) + 
                   _np.sum(expr) +
                   _np.sum(_np.exp(-expr))) 
Example #6
Source File: tsm.py    From pyflux with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _laplace_fit(self,obj_type):
        """ Performs a Laplace approximation to the posterior

        Parameters
        ----------
        obj_type : method
            Whether a likelihood or a posterior

        Returns
        ----------
        None (plots posterior)
        """

        # Get Mode and Inverse Hessian information
        y = self.fit(method='PML',printer=False)

        if y.ihessian is None:
            raise Exception("No Hessian information - Laplace approximation cannot be performed")
        else:

            self.latent_variables.estimation_method = 'Laplace'

            theta, Y, scores, states, states_var, X_names = self._categorize_model_output(self.latent_variables.get_z_values())

            # Change this in future
            try:
                latent_variables_store = self.latent_variables.copy()
            except:
                latent_variables_store = self.latent_variables

            return LaplaceResults(data_name=self.data_name,X_names=X_names,model_name=self.model_name,
                model_type=self.model_type, latent_variables=latent_variables_store,data=Y,index=self.index,
                multivariate_model=self.multivariate_model,objective_object=obj_type, 
                method='Laplace',ihessian=y.ihessian,signal=theta,scores=scores,
                z_hide=self._z_hide,max_lag=self.max_lag,states=states,states_var=states_var) 
Example #7
Source File: numdiff.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def approx_hess_cs_old(x, func, args=(), h=1.0e-20, epsilon=1e-6):
        def grad(x):
            return approx_fprime_cs(x, func, args=args, h=1.0e-20)

        #Hessian from gradient:
        return (approx_fprime(x, grad, epsilon)
                + approx_fprime(x, grad, -epsilon))/2. 
Example #8
Source File: numdiff.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def approx_hess_cs(x, f, epsilon=None, args=(), kwargs={}):
    '''Calculate Hessian with complex-step derivative approximation

    Parameters
    ----------
    x : array_like
       value at which function derivative is evaluated
    f : function
       function of one array f(x)
    epsilon : float
       stepsize, if None, then stepsize is automatically chosen

    Returns
    -------
    hess : ndarray
       array of partial second derivatives, Hessian

    Notes
    -----
    based on equation 10 in
    M. S. RIDOUT: Statistical Applications of the Complex-step Method
    of Numerical Differentiation, University of Kent, Canterbury, Kent, U.K.

    The stepsize is the same for the complex and the finite difference part.
    '''
    # TODO: might want to consider lowering the step for pure derivatives
    n = len(x)
    h = _get_epsilon(x, 3, epsilon, n)
    ee = np.diag(h)
    hess = np.outer(h, h)

    n = len(x)

    for i in range(n):
        for j in range(i, n):
            hess[i, j] = (f(*((x + 1j*ee[i, :] + ee[j, :],) + args), **kwargs)
                          - f(*((x + 1j*ee[i, :] - ee[j, :],)+args),
                              **kwargs)).imag/2./hess[i, j]
            hess[j, i] = hess[i, j]

    return hess 
Example #9
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #10
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        Returns numerical hessian for now.  Depends on numdifftools.
        """

        h = Hessian(self.loglike)
        return h(params) 
Example #11
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #12
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        The Hessian matrix of the model
        """
        raise NotImplementedError 
Example #13
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def information(self, params):
        """
        Fisher information matrix of model

        Returns -Hessian of loglike evaluated at params.
        """
        raise NotImplementedError 
Example #14
Source File: differentiation.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def hessian(
    func,
    params,
    method="central",
    extrapolation=True,
    func_kwargs=None,
    step_options=None,
):
    """
    Calculate the hessian of *func*.

    Args:
        func (function): A function that maps params into a float.
        params (DataFrame): see :ref:`params`
        method (string): The method for the computation of the derivative. Default is
                         central as it gives the highest accuracy.
        extrapolation (bool): Use richardson extrapolations.
        func_kwargs (dict): additional positional arguments for func.
        step_options (dict): Options for the numdifftools step generator.
            See :ref:`step_options`

    Returns:
        DataFrame: The index and columns are the index of params. The data is
            the estimated hessian.

    """
    step_options = step_options if step_options is not None else {}

    if method != "central":
        raise ValueError("Only the method 'central' is supported.")

    func_kwargs = {} if func_kwargs is None else func_kwargs

    internal_func = _create_internal_func(func, params, func_kwargs)
    params_value = params["value"].to_numpy()

    if extrapolation:
        hess_np = nd.Hessian(internal_func, method=method, **step_options)(params_value)
    else:
        hess_np = _no_extrapolation_hessian(internal_func, params_value, method)
    return pd.DataFrame(data=hess_np, index=params.index, columns=params.index) 
Example #15
Source File: numdiff.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def approx_hess_cs_old(x, func, args=(), h=1.0e-20, epsilon=1e-6):
        def grad(x):
            return approx_fprime_cs(x, func, args=args, h=1.0e-20)

        #Hessian from gradient:
        return (approx_fprime(x, grad, epsilon)
                + approx_fprime(x, grad, -epsilon))/2. 
Example #16
Source File: numdiff.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def approx_hess_cs(x, f, epsilon=None, args=(), kwargs={}):
    '''Calculate Hessian with complex-step derivative approximation

    Parameters
    ----------
    x : array_like
       value at which function derivative is evaluated
    f : function
       function of one array f(x)
    epsilon : float
       stepsize, if None, then stepsize is automatically chosen

    Returns
    -------
    hess : ndarray
       array of partial second derivatives, Hessian

    Notes
    -----
    based on equation 10 in
    M. S. RIDOUT: Statistical Applications of the Complex-step Method
    of Numerical Differentiation, University of Kent, Canterbury, Kent, U.K.

    The stepsize is the same for the complex and the finite difference part.
    '''
    # TODO: might want to consider lowering the step for pure derivatives
    n = len(x)
    h = _get_epsilon(x, 3, epsilon, n)
    ee = np.diag(h)
    hess = np.outer(h, h)

    n = len(x)

    for i in range(n):
        for j in range(i, n):
            hess[i, j] = (f(*((x + 1j*ee[i, :] + ee[j, :],) + args), **kwargs)
                          - f(*((x + 1j*ee[i, :] - ee[j, :],)+args),
                              **kwargs)).imag/2./hess[i, j]
            hess[j, i] = hess[i, j]

    return hess 
Example #17
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #18
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Returns numerical hessian for now.  Depends on numdifftools.
        """

        h = Hessian(self.loglike)
        return h(params) 
Example #19
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #20
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        The Hessian matrix of the model
        """
        raise NotImplementedError 
Example #21
Source File: stats.py    From tensorprob with MIT License 5 votes vote down vote up
def fisher(model, variables=None):
    '''
    Calculates the covariance matrix of the variables, given the model.
    The model needs to have been optimized for this to work.
    '''

    if variables is None:
        keys = model._hidden.keys()
        variables = model._hidden.values()

    def func(xs):
        feed = { k: v for k, v in zip(variables, xs) }
        out = model.session.run(model._nll, feed_dict=feed)
        if np.isnan(out):
            return 1e100
        return out

    x = model.session.run(list(model._hidden.values()))
    hess = Hessian(func)(x)
    cov = np.linalg.inv(hess)

    result = dict()

    for i, v1 in enumerate(keys):
        result[v1] = dict()
        for j, v2 in enumerate(keys):
            result[v1][v2] = cov[i,j]

    return result 
Example #22
Source File: tsm.py    From pyflux with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _optimize_fit(self, obj_type=None, **kwargs):
        """
        This function fits models using Maximum Likelihood or Penalized Maximum Likelihood
        """

        preopt_search = kwargs.get('preopt_search', True) # If user supplied

        if obj_type == self.neg_loglik:
            method = 'MLE'
        else:
            method = 'PML'

        # Starting values - check to see if model has preoptimize method, if not, simply use default starting values
        if preopt_search is True:
            try:
                phi = self._preoptimize_model(self.latent_variables.get_z_starting_values(), method)
                preoptimized = True
            except:
                phi = self.latent_variables.get_z_starting_values()
                preoptimized = False
        else:
            preoptimized = False
            phi = self.latent_variables.get_z_starting_values()

        phi = kwargs.get('start',phi).copy() # If user supplied

        # Optimize using L-BFGS-B
        p = optimize.minimize(obj_type, phi, method='L-BFGS-B', options={'gtol': 1e-8})
        if preoptimized is True:
            p2 = optimize.minimize(obj_type, self.latent_variables.get_z_starting_values(), method='L-BFGS-B', 
                options={'gtol': 1e-8})
            if self.neg_loglik(p2.x) < self.neg_loglik(p.x):
                p = p2

        theta, Y, scores, states, states_var, X_names = self._categorize_model_output(p.x)

        # Check that matrix is non-singular; act accordingly
        try:
            ihessian = np.linalg.inv(nd.Hessian(obj_type)(p.x))
            ses = np.power(np.abs(np.diag(ihessian)),0.5)
            self.latent_variables.set_z_values(p.x,method,ses,None)

        except:
            ihessian = None
            ses = None
            self.latent_variables.set_z_values(p.x,method,None,None)

        self.latent_variables.estimation_method = method

        # Change this in future
        try:
            latent_variables_store = self.latent_variables.copy()
        except:
            latent_variables_store = self.latent_variables

        return MLEResults(data_name=self.data_name,X_names=X_names,model_name=self.model_name,
                model_type=self.model_type, latent_variables=latent_variables_store,results=p,data=Y, index=self.index,
                multivariate_model=self.multivariate_model,objective_object=obj_type, 
                method=method,ihessian=ihessian,signal=theta,scores=scores,
                z_hide=self._z_hide,max_lag=self.max_lag,states=states,states_var=states_var) 
Example #23
Source File: classic.py    From scikit-extremes with MIT License 4 votes vote down vote up
def _ci_delta(self):
        # Calculate the variance-covariance matrix using the
        # hessian from numdifftools
        # This is used to obtain confidence intervals for the estimators and
        # the return values for several return values.
        #
        # More info about the delta method can be found on:
        #     - Coles, Stuart: "An Introduction to Statistical Modeling of  
        #     Extreme Values", Springer (2001)
        #     - https://en.wikipedia.org/wiki/Delta_method
        
        # data
        c  = -self.c    # We negate the shape to avoid inconsistency problems!?
        loc = self.loc
        scale = self.scale
        hess = _ndt.Hessian(self._nnlf)
        T = _np.arange(0.1, 500.1, 0.1)
        sT = -_np.log(1.-self.frec/T)
        sT2 = self.distr.isf(self.frec/T)
        
        # VarCovar matrix and confidence values for estimators and return values
        # Confidence interval for return values (up values and down values)
        ci_Tu = _np.zeros(sT.shape)
        ci_Td = _np.zeros(sT.shape)
        if c:         # If c then we are calculating GEV confidence intervals
            varcovar = _np.linalg.inv(hess([c, loc, scale]))
            self.params_ci = OrderedDict()
            se = _np.sqrt(_np.diag(varcovar))
            self._se = se
            self.params_ci['shape']    = (self.c - _st.norm.ppf(1 - self.ci / 2) * se[0],
                                          self.c + _st.norm.ppf(1 - self.ci / 2) * se[0])
            self.params_ci['location'] = (self.loc - _st.norm.ppf(1 - self.ci / 2) * se[1],
                                          self.loc + _st.norm.ppf(1 - self.ci / 2) * se[1])
            self.params_ci['scale']    = (self.scale - _st.norm.ppf(1 - self.ci / 2) * se[2],
                                          self.scale + _st.norm.ppf(1 - self.ci / 2) * se[2])
            for i, val in enumerate(sT2):
                gradZ = [scale * (c**-2) * (1 - sT[i] ** (-c)) - scale * (c**-1) * (sT[i]**-c) * _np.log(sT[i]),
                         1, 
                         -(1 - sT[i] ** (-c)) / c]
                se = _np.dot(_np.dot(gradZ, varcovar), _np.array(gradZ).T)
                ci_Tu[i] = val + _st.norm.ppf(1 - self.ci / 2) * _np.sqrt(se)
                ci_Td[i] = val - _st.norm.ppf(1 - self.ci / 2) * _np.sqrt(se)
        else:         # else then we are calculating Gumbel confidence intervals
            varcovar = _np.linalg.inv(hess([loc, scale]))
            self.params_ci = OrderedDict()
            se = _np.sqrt(_np.diag(varcovar))
            self._se = se
            self.params_ci['shape']    = (0, 0)
            self.params_ci['location'] = (self.loc - _st.norm.ppf(1 - self.ci / 2) * se[0],
                                          self.loc + _st.norm.ppf(1 - self.ci / 2) * se[0])
            self.params_ci['scale']    = (self.scale - _st.norm.ppf(1 - self.ci / 2) * se[1],
                                          self.scale + _st.norm.ppf(1 - self.ci / 2) * se[1])
            for i, val in enumerate(sT2):
                gradZ = [1, -_np.log(sT[i])]
                se = _np.dot(_np.dot(gradZ, varcovar), _np.array(gradZ).T)
                ci_Tu[i] = val + _st.norm.ppf(1 - self.ci / 2) * _np.sqrt(se)
                ci_Td[i] = val - _st.norm.ppf(1 - self.ci / 2) * _np.sqrt(se)
        self._ci_Tu = ci_Tu
        self._ci_Td = ci_Td