Python pymc3.HalfCauchy() Examples
The following are 15
code examples of pymc3.HalfCauchy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pymc3
, or try the search function
.
Example #1
Source File: hbr.py From nispat with GNU General Public License v3.0 | 5 votes |
def from_posterior(param, samples, distribution = None, half = False, freedom=10): if len(samples.shape)>1: shape = samples.shape[1:] else: shape = None if (distribution is None): smin, smax = np.min(samples), np.max(samples) width = smax - smin x = np.linspace(smin, smax, 1000) y = stats.gaussian_kde(samples)(x) if half: x = np.concatenate([x, [x[-1] + 0.1 * width]]) y = np.concatenate([y, [0]]) else: x = np.concatenate([[x[0] - 0.1 * width], x, [x[-1] + 0.1 * width]]) y = np.concatenate([[0], y, [0]]) return pm.distributions.Interpolated(param, x, y) elif (distribution=='normal'): temp = stats.norm.fit(samples) if shape is None: return pm.Normal(param, mu=temp[0], sigma=freedom*temp[1]) else: return pm.Normal(param, mu=temp[0], sigma=freedom*temp[1], shape=shape) elif (distribution=='hnormal'): temp = stats.halfnorm.fit(samples) if shape is None: return pm.HalfNormal(param, sigma=freedom*temp[1]) else: return pm.HalfNormal(param, sigma=freedom*temp[1], shape=shape) elif (distribution=='hcauchy'): temp = stats.halfcauchy.fit(samples) if shape is None: return pm.HalfCauchy(param, freedom*temp[1]) else: return pm.HalfCauchy(param, freedom*temp[1], shape=shape)
Example #2
Source File: bayesian_regression.py From autoimpute with MIT License | 5 votes |
def fit(self, X, y): """Fit the Imputer to the dataset by fitting bayesian model. Args: X (pd.Dataframe): dataset to fit the imputer. y (pd.Series): response, which is eventually imputed. Returns: self. Instance of the class. """ _not_num_series(self.strategy, y) nc = len(X.columns) # initialize model for bayesian linear reg. Default vals for priors # assume data is scaled and centered. Convergence can struggle or fail # if not the case and proper values for the priors are not specified # separately, also assumes each beta is normal and "independent" # while betas likely not independent, this is technically a rule of OLS with pm.Model() as fit_model: alpha = pm.Normal("alpha", self.am, sd=self.asd) beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc) sigma = pm.HalfCauchy("σ", self.sig) mu = alpha+beta.dot(X.T) score = pm.Normal("score", mu, sd=sigma, observed=y) self.statistics_ = {"param": fit_model, "strategy": self.strategy} return self
Example #3
Source File: pmm.py From autoimpute with MIT License | 5 votes |
def fit(self, X, y): """Fit the Imputer to the dataset by fitting bayesian and LS model. Args: X (pd.Dataframe): dataset to fit the imputer. y (pd.Series): response, which is eventually imputed. Returns: self. Instance of the class. """ _not_num_series(self.strategy, y) nc = len(X.columns) # get predictions for the data, which will be used for "closest" vals y_pred = self.lm.fit(X, y).predict(X) y_df = DataFrame({"y": y, "y_pred": y_pred}) # calculate bayes and use appropriate means for alpha and beta priors # here we specify the point estimates from the linear regression as the # means for the priors. This will greatly speed up posterior sampling # and help ensure that convergence occurs if self.am is None: self.am = self.lm.intercept_ if self.bm is None: self.bm = self.lm.coef_ # initialize model for bayesian linear reg. Default vals for priors # assume data is scaled and centered. Convergence can struggle or fail # if not the case and proper values for the priors are not specified # separately, also assumes each beta is normal and "independent" # while betas likely not independent, this is technically a rule of OLS with pm.Model() as fit_model: alpha = pm.Normal("alpha", self.am, sd=self.asd) beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc) sigma = pm.HalfCauchy("σ", self.sig) mu = alpha+beta.dot(X.T) score = pm.Normal("score", mu, sd=sigma, observed=y) params = {"model": fit_model, "y_obs": y_df} self.statistics_ = {"param": params, "strategy": self.strategy} return self
Example #4
Source File: lrd.py From autoimpute with MIT License | 5 votes |
def fit(self, X, y): """Fit the Imputer to the dataset by fitting bayesian and LS model. Args: X (pd.Dataframe): dataset to fit the imputer. y (pd.Series): response, which is eventually imputed. Returns: self. Instance of the class. """ _not_num_series(self.strategy, y) nc = len(X.columns) # get predictions for the data, which will be used for "closest" vals y_pred = self.lm.fit(X, y).predict(X) y_df = DataFrame({"y": y, "y_pred": y_pred}) # calculate bayes and use appropriate means for alpha and beta priors # here we specify the point estimates from the linear regression as the # means for the priors. This will greatly speed up posterior sampling # and help ensure that convergence occurs if self.am is None: self.am = self.lm.intercept_ if self.bm is None: self.bm = self.lm.coef_ # initialize model for bayesian linear reg. Default vals for priors # assume data is scaled and centered. Convergence can struggle or fail # if not the case and proper values for the priors are not specified # separately, also assumes each beta is normal and "independent" # while betas likely not independent, this is technically a rule of OLS with pm.Model() as fit_model: alpha = pm.Normal("alpha", self.am, sd=self.asd) beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc) sigma = pm.HalfCauchy("σ", self.sig) mu = alpha+beta.dot(X.T) score = pm.Normal("score", mu, sd=sigma, observed=y) params = {"model": fit_model, "y_obs": y_df} self.statistics_ = {"param": params, "strategy": self.strategy} return self
Example #5
Source File: helpers.py From arviz with Apache License 2.0 | 5 votes |
def _pyro_noncentered_model(J, sigma, y=None): import pyro import pyro.distributions as dist mu = pyro.sample("mu", dist.Normal(0, 5)) tau = pyro.sample("tau", dist.HalfCauchy(5)) with pyro.plate("J", J): eta = pyro.sample("eta", dist.Normal(0, 1)) theta = mu + tau * eta return pyro.sample("obs", dist.Normal(theta, sigma), obs=y)
Example #6
Source File: helpers.py From arviz with Apache License 2.0 | 5 votes |
def _numpyro_noncentered_model(J, sigma, y=None): import numpyro import numpyro.distributions as dist mu = numpyro.sample("mu", dist.Normal(0, 5)) tau = numpyro.sample("tau", dist.HalfCauchy(5)) with numpyro.plate("J", J): eta = numpyro.sample("eta", dist.Normal(0, 1)) theta = mu + tau * eta return numpyro.sample("obs", dist.Normal(theta, sigma), obs=y)
Example #7
Source File: helpers.py From arviz with Apache License 2.0 | 5 votes |
def pymc3_noncentered_schools(data, draws, chains): """Non-centered eight schools implementation for pymc3.""" import pymc3 as pm with pm.Model() as model: mu = pm.Normal("mu", mu=0, sd=5) tau = pm.HalfCauchy("tau", beta=5) eta = pm.Normal("eta", mu=0, sd=1, shape=data["J"]) theta = pm.Deterministic("theta", mu + tau * eta) pm.Normal("obs", mu=theta, sd=data["sigma"], observed=data["y"]) trace = pm.sample(draws, chains=chains) return model, trace
Example #8
Source File: model_selector.py From cs-ranking with Apache License 2.0 | 5 votes |
def __init__( self, learner_cls, parameter_keys, model_params, fit_params, model_path, **kwargs, ): self.priors = [ [pm.Normal, {"mu": 0, "sd": 10}], [pm.Laplace, {"mu": 0, "b": 10}], ] self.uniform_prior = [pm.Uniform, {"lower": -20, "upper": 20}] self.prior_indices = np.arange(len(self.priors)) self.parameter_f = [ (pm.Normal, {"mu": 0, "sd": 5}), (pm.Cauchy, {"alpha": 0, "beta": 1}), 0, -5, 5, ] self.parameter_s = [ (pm.HalfCauchy, {"beta": 1}), (pm.HalfNormal, {"sd": 0.5}), (pm.Exponential, {"lam": 0.5}), (pm.Uniform, {"lower": 1, "upper": 10}), 10, ] # ,(pm.HalfCauchy, {'beta': 2}), (pm.HalfNormal, {'sd': 1}),(pm.Exponential, {'lam': 1.0})] self.learner_cls = learner_cls self.model_params = model_params self.fit_params = fit_params self.parameter_keys = parameter_keys self.parameters = list(product(self.parameter_f, self.parameter_s)) pf_arange = np.arange(len(self.parameter_f)) ps_arange = np.arange(len(self.parameter_s)) self.parameter_ind = list(product(pf_arange, ps_arange)) self.model_path = model_path self.models = dict() self.logger = logging.getLogger(ModelSelector.__name__)
Example #9
Source File: generalized_linear_model.py From cs-ranking with Apache License 2.0 | 5 votes |
def model_configuration(self): """ Constructs the dictionary containing the priors for the weight vectors for the model according to the regularization function. The parameters are: * **weights** : Weights to evaluates the utility of the objects For ``l1`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{b}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Laplace}(\\text{mu}=\\text{mu}_w, \\text{b}=\\text{b}_w) For ``l2`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{sd}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Normal}(\\text{mu}=\\text{mu}_w, \\text{sd}=\\text{sd}_w) """ if self.regularization == "l2": weight = pm.Normal prior = "sd" elif self.regularization == "l1": weight = pm.Laplace prior = "b" configuration = { "weights": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 10}), prior: (pm.HalfCauchy, {"beta": 1}), }, ] } self.logger.info( "Creating default config {}".format(print_dictionary(configuration)) ) return configuration
Example #10
Source File: coKriging.py From gempy with GNU Lesser General Public License v3.0 | 4 votes |
def fit_cross_cov(self, n_exp=2, n_gauss=2, range_mu=None): """ Fit an analytical covariance to the experimental data. Args: n_exp (int): number of exponential basic functions n_gauss (int): number of gaussian basic functions range_mu: prior mean of the range. Default mean of the lags Returns: pymc.Model: PyMC3 model to be sampled using MCMC """ self.n_exp = n_exp self.n_gauss = n_gauss n_var = self.n_properties df = self.exp_var lags = self.lags # Prior standard deviation for the error of the regression prior_std_reg = df.std(0).max() * 10 # Prior value for the mean of the ranges if not range_mu: range_mu = lags.mean() # pymc3 Model with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement # Define priors sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var) psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gauss)) range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gauss)) lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gauss))) # Exponential covariance exp = pm.Deterministic('exp', # (lambda_[:n_exp*n_var]* psill[:n_exp] * (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)), (range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1)))) gauss = pm.Deterministic('gaus', psill[n_exp:] * (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2, (range_[n_exp:].reshape((1, n_gauss)) * 4 / 7.) ** -2)))) # We stack the basic functions in the same matrix and tile it to match the number of properties we have func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gauss), (n_var, 1, 1))) # We weight each basic function and sum them func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gauss))), axis=2)) for e, cross in enumerate(df.columns): # Likelihoods pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values) return model
Example #11
Source File: coKriging.py From gempy with GNU Lesser General Public License v3.0 | 4 votes |
def fit_cross_cov(df, lags, n_exp=2, n_gaus=2, range_mu=None): n_var = df.columns.shape[0] n_basis_f = n_var * (n_exp + n_gaus) prior_std_reg = df.std(0).max() * 10 # if not range_mu: range_mu = lags.mean() # Because is a experimental variogram I am not going to have outliers nugget_max = df.values.max() # print(n_basis_f, n_var*n_exp, nugget_max, range_mu, prior_std_reg) # pymc3 Model with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement # Define priors sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var) psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gaus)) range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gaus)) # nugget = pm.Uniform('nugget', 0, nugget_max, shape=n_var) lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gaus))) # Exponential covariance exp = pm.Deterministic('exp', # (lambda_[:n_exp*n_var]* psill[:n_exp] * (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)), (range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1)))) gaus = pm.Deterministic('gaus', psill[n_exp:] * (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2, (range_[n_exp:].reshape((1, n_gaus)) * 4 / 7.) ** -2)))) func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gaus), (n_var, 1, 1))) func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gaus))), axis=2)) # nugget.reshape((n_var,1))) for e, cross in enumerate(df.columns): # Likelihoods pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values) return model
Example #12
Source File: generalized_nested_logit.py From cs-ranking with Apache License 2.0 | 4 votes |
def model_configuration(self): """ Constructs the dictionary containing the priors for the weight vectors for the model according to the regularization function. The parameters are: * **weights** : Weights to evaluates the utility of the objects * **weights_k** : Weights to evaluates the fractional allocation of each object in :math:'Q' to each nest For ``l1`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{b}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Laplace}(\\text{mu}=\\text{mu}_w, \\text{b}=\\text{b}_w) For ``l2`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{sd}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Normal}(\\text{mu}=\\text{mu}_w, \\text{sd}=\\text{sd}_w) Returns ------- configuration : dict Dictionary containing the priors applies on the weights """ if self._config is None: if self.regularization == "l2": weight = pm.Normal prior = "sd" elif self.regularization == "l1": weight = pm.Laplace prior = "b" self._config = { "weights": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 5}), prior: (pm.HalfCauchy, {"beta": 1}), }, ], "weights_ik": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 5}), prior: (pm.HalfCauchy, {"beta": 1}), }, ], } self.logger.info( "Creating model with config {}".format(print_dictionary(self._config)) ) return self._config
Example #13
Source File: mixed_logit_model.py From cs-ranking with Apache License 2.0 | 4 votes |
def model_configuration(self): """ Constructs the dictionary containing the priors for the weight vectors for the model according to the regularization function. The parameters are: * **weights** : Distribution of the weigh vectors to evaluates the utility of the objects For ``l1`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{b}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Laplace}(\\text{mu}=\\text{mu}_w, \\text{b}=\\text{b}_w) For ``l2`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{sd}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Normal}(\\text{mu}=\\text{mu}_w, \\text{sd}=\\text{sd}_w) """ if self._config is None: if self.regularization == "l2": weight = pm.Normal prior = "sd" elif self.regularization == "l1": weight = pm.Laplace prior = "b" self._config = { "weights": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 5}), prior: (pm.HalfCauchy, {"beta": 1}), }, ] } self.logger.info( "Creating model with config {}".format(print_dictionary(self._config)) ) return self._config
Example #14
Source File: paired_combinatorial_logit.py From cs-ranking with Apache License 2.0 | 4 votes |
def model_configuration(self): """ Constructs the dictionary containing the priors for the weight vectors for the model according to the regularization function. The parameters are: * **weights** : Weights to evaluates the utility of the objects For ``l1`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{b}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Laplace}(\\text{mu}=\\text{mu}_w, \\text{b}=\\text{b}_w) For ``l2`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{sd}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Normal}(\\text{mu}=\\text{mu}_w, \\text{sd}=\\text{sd}_w) Returns ------- configuration : dict Dictionary containing the priors applies on the weights """ if self._config is None: if self.regularization == "l2": weight = pm.Normal prior = "sd" elif self.regularization == "l1": weight = pm.Laplace prior = "b" self._config = { "weights": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 5}), prior: (pm.HalfCauchy, {"beta": 1}), }, ] } self.logger.info( "Creating model with config {}".format(print_dictionary(self._config)) ) return self._config #
Example #15
Source File: nested_logit_model.py From cs-ranking with Apache License 2.0 | 4 votes |
def model_configuration(self): """ Constructs the dictionary containing the priors for the weight vectors for the model according to the regularization function. The parameters are: * **weights** : Weights to evaluates the utility of the objects * **weights_k** : Weights to evaluates the utility of the nests For ``l1`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{b}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Laplace}(\\text{mu}=\\text{mu}_w, \\text{b}=\\text{b}_w) For ``l2`` regularization the priors are: .. math:: \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\ \\text{sd}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\ \\text{weights} \\sim \\text{Normal}(\\text{mu}=\\text{mu}_w, \\text{sd}=\\text{sd}_w) Returns ------- configuration : dict Dictionary containing the priors applies on the weights """ if self._config is None: if self.regularization == "l2": weight = pm.Normal prior = "sd" elif self.regularization == "l1": weight = pm.Laplace prior = "b" self._config = { "weights": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 5}), prior: (pm.HalfCauchy, {"beta": 1}), }, ], "weights_k": [ weight, { "mu": (pm.Normal, {"mu": 0, "sd": 5}), prior: (pm.HalfCauchy, {"beta": 1}), }, ], } self.logger.info( "Creating model with config {}".format(print_dictionary(self._config)) ) return self._config