Python sklearn.tree.ExtraTreeRegressor() Examples
The following are 6
code examples of sklearn.tree.ExtraTreeRegressor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.tree
, or try the search function
.
Example #1
Source File: skl_utils.py From kaggle-HomeDepot with MIT License | 5 votes |
def __init__(self, base_estimator=None, n_estimators=50, max_features=1.0, max_depth=6, learning_rate=1.0, loss='linear', random_state=None): if base_estimator and base_estimator == 'etr': base_estimator = ExtraTreeRegressor(max_depth=max_depth, max_features=max_features) else: base_estimator = DecisionTreeRegressor(max_depth=max_depth, max_features=max_features) self.model = sklearn.ensemble.AdaBoostRegressor( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, loss=loss)
Example #2
Source File: SeparateTreesRegressor.py From Cytomine-python-datamining with Apache License 2.0 | 5 votes |
def build_lonely_tree_regressor(X, y, max_features, max_depth, min_samples_split): clf = ExtraTreeRegressor(max_features=max_features, max_depth=max_depth, min_samples_split=min_samples_split) clf = clf.fit(X, y) return clf
Example #3
Source File: VotingTreeRegressor.py From Cytomine-python-datamining with Apache License 2.0 | 5 votes |
def build_voting_tree_regressor(X,y,max_features,max_depth,min_samples_split): clf = ExtraTreeRegressor(max_features=max_features,max_depth=max_depth,min_samples_split=min_samples_split) clf = clf.fit(X,y) return clf
Example #4
Source File: test_utils.py From causallib with Apache License 2.0 | 5 votes |
def test_check_regression_learner_is_fitted(self): from sklearn.linear_model import LinearRegression from sklearn.tree import ExtraTreeRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.svm import SVR from sklearn.datasets import make_regression X, y = make_regression() for regr in [LinearRegression(), ExtraTreeRegressor(), GradientBoostingRegressor(), SVR()]: self.ensure_learner_is_fitted(regr, X, y)
Example #5
Source File: test_tree.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_objectmapper(self): df = pdml.ModelFrame([]) self.assertIs(df.tree.DecisionTreeClassifier, tree.DecisionTreeClassifier) self.assertIs(df.tree.DecisionTreeRegressor, tree.DecisionTreeRegressor) self.assertIs(df.tree.ExtraTreeClassifier, tree.ExtraTreeClassifier) self.assertIs(df.tree.ExtraTreeRegressor, tree.ExtraTreeRegressor) self.assertIs(df.tree.export_graphviz, tree.export_graphviz)
Example #6
Source File: ensemble.py From sk-dist with Apache License 2.0 | 5 votes |
def __init__(self, sc=None, partitions='auto', n_estimators=100, max_depth=5, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, sparse_output=True, n_jobs=None, random_state=None, verbose=0, warm_start=False): super().__init__( base_estimator=ExtraTreeRegressor(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "min_impurity_split", "random_state"), bootstrap=False, oob_score=False, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start) self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split self.sparse_output = sparse_output self.sc = sc self.partitions = partitions