Python sklearn.pipeline._name_estimators() Examples
The following are 6
code examples of sklearn.pipeline._name_estimators().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.pipeline
, or try the search function
.
Example #1
Source File: pipeline.py From sparkit-learn with Apache License 2.0 | 6 votes |
def make_sparkunion(*transformers): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, n_components=None, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) Returns ------- f : FeatureUnion """ return SparkFeatureUnion(_name_estimators(transformers))
Example #2
Source File: p206_majority_vote_classifier.py From PythonMachineLearningExamples with MIT License | 5 votes |
def __init__(self, classifiers, vote='classlabel', weights=None): self.classifiers = classifiers self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)} self.vote = vote self.weights = weights
Example #3
Source File: pipeline.py From kenchi with BSD 3-Clause "New" or "Revised" License | 5 votes |
def make_pipeline(*steps): """Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list List of estimators. Returns ------- p : Pipeline Examples -------- >>> from kenchi.outlier_detection import MiniBatchKMeans >>> from kenchi.pipeline import make_pipeline >>> from sklearn.preprocessing import StandardScaler >>> scaler = StandardScaler() >>> det = MiniBatchKMeans() >>> pipeline = make_pipeline(scaler, det) """ return Pipeline(_name_estimators(steps))
Example #4
Source File: feature_union.py From mercari-solution with MIT License | 5 votes |
def make_union_mp(*transformers, **kwargs): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Parameters ---------- *transformers : list of estimators n_jobs : int, optional Number of jobs to run in parallel (default 1). Returns ------- f : FeatureUnion Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> from sklearn.pipeline import make_union >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, iterated_power='auto', n_components=None, random_state=None, svd_solver='auto', tol=0.0, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) """ n_jobs = kwargs.pop('n_jobs', 1) if kwargs: # We do not currently support `transformer_weights` as we may want to # change its type spec in make_union raise TypeError('Unknown keyword arguments: "{}"' .format(list(kwargs.keys())[0])) return FeatureUnionMP(_name_estimators(transformers), n_jobs=n_jobs)
Example #5
Source File: feature_union.py From Wordbatch with GNU General Public License v2.0 | 4 votes |
def make_union(*transformers, **kwargs): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Parameters ---------- *transformers : list of estimators n_jobs : int, optional Number of jobs to run in parallel (default 1). Returns ------- f : FeatureUnion Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> from sklearn.pipeline import make_union >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, iterated_power='auto', n_components=None, random_state=None, svd_solver='auto', tol=0.0, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) """ n_jobs = kwargs.pop('n_jobs', 1) concatenate = kwargs.pop('concatenate', True) if kwargs: # We do not currently support `transformer_weights` as we may want to # change its type spec in make_union raise TypeError('Unknown keyword arguments: "{}"' .format(list(kwargs.keys())[0])) return FeatureUnion(_name_estimators(transformers), n_jobs= n_jobs, concatenate= concatenate)
Example #6
Source File: pipeline.py From scikit-lego with MIT License | 4 votes |
def make_debug_pipeline(*steps, **kwargs): """Construct a DebugPipeline from the given estimators. This is a shorthand for the DebugPipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators. memory : None, str or object with the joblib.Memory interface, optional Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. verbose : boolean, default=False If True, the time elapsed while fitting each step will be printed as it is completed. log_callback: string, default=None. The callback function that logs information in between each intermediate step. Defaults to None. If set to `'default'`, :func:`default_log_callback` is used. See :func:`default_log_callback` for an example. See Also -------- sklego.pipeline.DebugPipeline : Class for creating a pipeline of transforms with a final estimator. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_debug_pipeline(StandardScaler(), GaussianNB(priors=None)) DebugPipeline(steps=[('standardscaler', StandardScaler()), ('gaussiannb', GaussianNB())]) Returns ------- p : DebugPipeline """ memory = kwargs.pop('memory', None) verbose = kwargs.pop('verbose', False) log_callback = kwargs.pop('log_callback', None) if kwargs: raise TypeError('Unknown keyword arguments: "{}"' .format(list(kwargs.keys())[0])) return DebugPipeline(_name_estimators(steps), memory=memory, verbose=verbose, log_callback=log_callback)