Python sklearn.linear_model.HuberRegressor() Examples
The following are 10
code examples of sklearn.linear_model.HuberRegressor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.linear_model
, or try the search function
.
Example #1
Source File: test_standardization.py From causallib with Apache License 2.0 | 6 votes |
def ensure_many_models(self): from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR, LinearSVR import warnings from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings('ignore', category=ConvergenceWarning) for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor, ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor, KNeighborsRegressor, SVR, LinearSVR]: learner = learner() learner_name = str(learner).split("(", maxsplit=1)[0] with self.subTest("Test fit using {learner}".format(learner=learner_name)): model = self.estimator.__class__(learner) model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"]) self.assertTrue(True) # Fit did not crash
Example #2
Source File: robust.py From radiometric_normalization with Apache License 2.0 | 5 votes |
def fit(candidate_data, reference_data): ''' Tries a variety of robust fitting methods in what is considered descending order of how good the fits are with this type of data set (found empirically). :param list candidate_data: A 1D list or array representing only the image data of the candidate band :param list reference_data: A 1D list or array representing only the image data of the reference band :returns: A gain and an offset (tuple of floats) ''' try: logging.debug('Robust: Trying HuberRegressor with epsilon 1.01') gain, offset = _huber_regressor( candidate_data, reference_data, 1.01) except: try: logging.debug('Robust: Trying HuberRegressor with epsilon 1.05') gain, offset = _huber_regressor( candidate_data, reference_data, 1.05) except: try: logging.debug('Robust: Trying HuberRegressor with epsilon 1.1') gain, offset = _huber_regressor( candidate_data, reference_data, 1.1) except: try: logging.debug('Robust: Trying HuberRegressor with epsilon ' '1.35') gain, offset = _huber_regressor( candidate_data, reference_data, 1.35) except: logging.debug('Robust: Trying RANSAC') gain, offset = _ransac_regressor( candidate_data, reference_data) return gain, offset
Example #3
Source File: robust.py From radiometric_normalization with Apache License 2.0 | 5 votes |
def _huber_regressor(candidate_data, reference_data, epsilon, max_iter=10000): model = linear_model.HuberRegressor(epsilon=epsilon, max_iter=max_iter) model.fit(numpy.array([[c] for c in candidate_data]), numpy.array(reference_data)) gain = model.coef_ offset = model.intercept_ return gain, offset
Example #4
Source File: scikitlearn.py From sia-cog with MIT License | 5 votes |
def getModels(): result = [] result.append("LinearRegression") result.append("BayesianRidge") result.append("ARDRegression") result.append("ElasticNet") result.append("HuberRegressor") result.append("Lasso") result.append("LassoLars") result.append("Rigid") result.append("SGDRegressor") result.append("SVR") result.append("MLPClassifier") result.append("KNeighborsClassifier") result.append("SVC") result.append("GaussianProcessClassifier") result.append("DecisionTreeClassifier") result.append("RandomForestClassifier") result.append("AdaBoostClassifier") result.append("GaussianNB") result.append("LogisticRegression") result.append("QuadraticDiscriminantAnalysis") return result
Example #5
Source File: test_doublyrobust.py From causallib with Apache License 2.0 | 5 votes |
def ensure_many_models(self): from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR, LinearSVR from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings('ignore', category=ConvergenceWarning) data = self.create_uninformative_ox_dataset() for propensity_learner in [GradientBoostingClassifier(n_estimators=10), RandomForestClassifier(n_estimators=100), MLPClassifier(hidden_layer_sizes=(5,)), KNeighborsClassifier(n_neighbors=20)]: weight_model = IPW(propensity_learner) propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0] for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10), MLPRegressor(hidden_layer_sizes=(5,)), ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(), KNeighborsRegressor(), SVR(), LinearSVR()]: outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0] outcome_model = Standardization(outcome_learner) with self.subTest("Test fit & predict using {} & {}".format(propensity_learner_name, outcome_learner_name)): model = self.estimator.__class__(outcome_model, weight_model) model.fit(data["X"], data["a"], data["y"], refit_weight_model=False) model.estimate_individual_outcome(data["X"], data["a"]) self.assertTrue(True) # Fit did not crash
Example #6
Source File: test_sklearn_glm_regressor_converter.py From sklearn-onnx with MIT License | 5 votes |
def test_model_huber_regressor(self): model, X = fit_regression_model(linear_model.HuberRegressor()) model_onnx = convert_sklearn( model, "huber regressor", [("input", FloatTensorType([None, X.shape[1]]))]) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnHuberRegressor-Dec4", allow_failure="StrictVersion(" "onnxruntime.__version__)" "<= StrictVersion('0.2.1')", )
Example #7
Source File: test_linear_model.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_objectmapper(self): df = pdml.ModelFrame([]) self.assertIs(df.linear_model.ARDRegression, lm.ARDRegression) self.assertIs(df.linear_model.BayesianRidge, lm.BayesianRidge) self.assertIs(df.linear_model.ElasticNet, lm.ElasticNet) self.assertIs(df.linear_model.ElasticNetCV, lm.ElasticNetCV) self.assertIs(df.linear_model.HuberRegressor, lm.HuberRegressor) self.assertIs(df.linear_model.Lars, lm.Lars) self.assertIs(df.linear_model.LarsCV, lm.LarsCV) self.assertIs(df.linear_model.Lasso, lm.Lasso) self.assertIs(df.linear_model.LassoCV, lm.LassoCV) self.assertIs(df.linear_model.LassoLars, lm.LassoLars) self.assertIs(df.linear_model.LassoLarsCV, lm.LassoLarsCV) self.assertIs(df.linear_model.LassoLarsIC, lm.LassoLarsIC) self.assertIs(df.linear_model.LinearRegression, lm.LinearRegression) self.assertIs(df.linear_model.LogisticRegression, lm.LogisticRegression) self.assertIs(df.linear_model.LogisticRegressionCV, lm.LogisticRegressionCV) self.assertIs(df.linear_model.MultiTaskLasso, lm.MultiTaskLasso) self.assertIs(df.linear_model.MultiTaskElasticNet, lm.MultiTaskElasticNet) self.assertIs(df.linear_model.MultiTaskLassoCV, lm.MultiTaskLassoCV) self.assertIs(df.linear_model.MultiTaskElasticNetCV, lm.MultiTaskElasticNetCV) self.assertIs(df.linear_model.OrthogonalMatchingPursuit, lm.OrthogonalMatchingPursuit) self.assertIs(df.linear_model.OrthogonalMatchingPursuitCV, lm.OrthogonalMatchingPursuitCV) self.assertIs(df.linear_model.PassiveAggressiveClassifier, lm.PassiveAggressiveClassifier) self.assertIs(df.linear_model.PassiveAggressiveRegressor, lm.PassiveAggressiveRegressor) self.assertIs(df.linear_model.Perceptron, lm.Perceptron) self.assertIs(df.linear_model.RandomizedLasso, lm.RandomizedLasso) self.assertIs(df.linear_model.RandomizedLogisticRegression, lm.RandomizedLogisticRegression) self.assertIs(df.linear_model.RANSACRegressor, lm.RANSACRegressor) self.assertIs(df.linear_model.Ridge, lm.Ridge) self.assertIs(df.linear_model.RidgeClassifier, lm.RidgeClassifier) self.assertIs(df.linear_model.RidgeClassifierCV, lm.RidgeClassifierCV) self.assertIs(df.linear_model.RidgeCV, lm.RidgeCV) self.assertIs(df.linear_model.SGDClassifier, lm.SGDClassifier) self.assertIs(df.linear_model.SGDRegressor, lm.SGDRegressor) self.assertIs(df.linear_model.TheilSenRegressor, lm.TheilSenRegressor)
Example #8
Source File: scikitlearn.py From sia-cog with MIT License | 4 votes |
def getSKLearnModel(modelName): if modelName == 'LinearRegression': model = linear_model.LinearRegression() elif modelName == 'BayesianRidge': model = linear_model.BayesianRidge() elif modelName == 'ARDRegression': model = linear_model.ARDRegression() elif modelName == 'ElasticNet': model = linear_model.ElasticNet() elif modelName == 'HuberRegressor': model = linear_model.HuberRegressor() elif modelName == 'Lasso': model = linear_model.Lasso() elif modelName == 'LassoLars': model = linear_model.LassoLars() elif modelName == 'Rigid': model = linear_model.Ridge() elif modelName == 'SGDRegressor': model = linear_model.SGDRegressor() elif modelName == 'SVR': model = SVR() elif modelName=='MLPClassifier': model = MLPClassifier() elif modelName=='KNeighborsClassifier': model = KNeighborsClassifier() elif modelName=='SVC': model = SVC() elif modelName=='GaussianProcessClassifier': model = GaussianProcessClassifier() elif modelName=='DecisionTreeClassifier': model = DecisionTreeClassifier() elif modelName=='RandomForestClassifier': model = RandomForestClassifier() elif modelName=='AdaBoostClassifier': model = AdaBoostClassifier() elif modelName=='GaussianNB': model = GaussianNB() elif modelName=='LogisticRegression': model = linear_model.LogisticRegression() elif modelName=='QuadraticDiscriminantAnalysis': model = QuadraticDiscriminantAnalysis() return model
Example #9
Source File: pca_regression.py From AmusingPythonCodes with MIT License | 4 votes |
def lets_try(train, labels): results = {} def test_model(clf): cv = KFold(n_splits=5, shuffle=True, random_state=45) r2 = make_scorer(r2_score) r2_val_score = cross_val_score(clf, train, labels, cv=cv, scoring=r2) scores = [r2_val_score.mean()] return scores clf = linear_model.LinearRegression() results["Linear"] = test_model(clf) clf = linear_model.Ridge() results["Ridge"] = test_model(clf) clf = linear_model.BayesianRidge() results["Bayesian Ridge"] = test_model(clf) clf = linear_model.HuberRegressor() results["Hubber"] = test_model(clf) clf = linear_model.Lasso(alpha=1e-4) results["Lasso"] = test_model(clf) clf = BaggingRegressor() results["Bagging"] = test_model(clf) clf = RandomForestRegressor() results["RandomForest"] = test_model(clf) clf = AdaBoostRegressor() results["AdaBoost"] = test_model(clf) clf = svm.SVR() results["SVM RBF"] = test_model(clf) clf = svm.SVR(kernel="linear") results["SVM Linear"] = test_model(clf) results = pd.DataFrame.from_dict(results, orient='index') results.columns = ["R Square Score"] # results = results.sort(columns=["R Square Score"], ascending=False) results.plot(kind="bar", title="Model Scores") axes = plt.gca() axes.set_ylim([0.5, 1]) return results
Example #10
Source File: test_doublyrobust.py From causallib with Apache License 2.0 | 4 votes |
def test_many_models(self): from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR, LinearSVR from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings('ignore', category=ConvergenceWarning) data = self.create_uninformative_ox_dataset() for propensity_learner in [GradientBoostingClassifier(n_estimators=10), RandomForestClassifier(n_estimators=100), MLPClassifier(hidden_layer_sizes=(5,)), KNeighborsClassifier(n_neighbors=20)]: weight_model = IPW(propensity_learner) propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0] for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10), RANSACRegressor(), HuberRegressor(), SVR(), LinearSVR()]: outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0] outcome_model = Standardization(outcome_learner) with self.subTest("Test fit using {} & {}".format(propensity_learner_name, outcome_learner_name)): model = self.estimator.__class__(outcome_model, weight_model) model.fit(data["X"], data["a"], data["y"], refit_weight_model=False) self.assertTrue(True) # Fit did not crash for outcome_learner in [MLPRegressor(hidden_layer_sizes=(5,)), ElasticNet(), PassiveAggressiveRegressor(), KNeighborsRegressor()]: outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0] outcome_model = Standardization(outcome_learner) with self.subTest("Test fit using {} & {}".format(propensity_learner_name, outcome_learner_name)): model = self.estimator.__class__(outcome_model, weight_model) with self.assertRaises(TypeError): # Joffe forces learning with sample_weights, # not all ML models support that and so calling should fail model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)