Python sklearn.model_selection._validation._fit_and_score() Examples
The following are 6
code examples of sklearn.model_selection._validation._fit_and_score().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.model_selection._validation
, or try the search function
.
Example #1
Source File: fixes.py From skutil with BSD 3-Clause "New" or "Revised" License | 7 votes |
def _do_fit(n_jobs, verbose, pre_dispatch, base_estimator, X, y, scorer, parameter_iterable, fit_params, error_score, cv, **kwargs): groups = kwargs.pop('groups') # test_score, n_samples, parameters out = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)( delayed(_fit_and_score)( clone(base_estimator), X, y, scorer, train, test, verbose, parameters, fit_params=fit_params, return_train_score=False, return_n_test_samples=True, return_times=False, return_parameters=True, error_score=error_score) for parameters in parameter_iterable for train, test in cv.split(X, y, groups)) # test_score, n_samples, _, parameters return [(mod[0], mod[1], None, mod[2]) for mod in out]
Example #2
Source File: optimize.py From SimulatedAnnealing with Apache License 2.0 | 6 votes |
def fit_score(self, X, Y): if isinstance(self.cv, int): n_folds = self.cv self.cv = KFold(n_splits=n_folds).split(X) # Formatting is kinda ugly but provides best debugging view out = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch)\ (delayed(_fit_and_score)(clone(self.clf), X, Y, self.metric, train, test, self.verbose, {}, {}, return_parameters=False, error_score='raise') for train, test in self.cv) # Out is a list of triplet: score, estimator, n_test_samples scores = list(zip(*out))[0] return np.mean(scores), np.std(scores)
Example #3
Source File: fixes.py From skutil with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _do_fit(n_jobs, verbose, pre_dispatch, base_estimator, X, y, scorer, parameter_iterable, fit_params, error_score, cv, **kwargs): # test_score, n_samples, score_time, parameters return Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)( delayed(_fit_and_score)( clone(base_estimator), X, y, scorer, train, test, verbose, parameters, fit_params, return_parameters=True, error_score=error_score) for parameters in parameter_iterable for train, test in cv)
Example #4
Source File: test_validation.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_fit_and_score_working(): X, y = make_classification(n_samples=30, random_state=0) clf = SVC(kernel="linear", random_state=0) train, test = next(ShuffleSplit().split(X)) # Test return_parameters option fit_and_score_args = [clf, X, y, dict(), train, test, 0] fit_and_score_kwargs = {'parameters': {'max_iter': 100, 'tol': 0.1}, 'fit_params': None, 'return_parameters': True} result = _fit_and_score(*fit_and_score_args, **fit_and_score_kwargs) assert result[-1] == fit_and_score_kwargs['parameters']
Example #5
Source File: test_validation.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_fit_and_score_verbosity(capsys, return_train_score, scorer, expected): X, y = make_classification(n_samples=30, random_state=0) clf = SVC(kernel="linear", random_state=0) train, test = next(ShuffleSplit().split(X)) # test print without train score fit_and_score_args = [clf, X, y, scorer, train, test, 10, None, None] fit_and_score_kwargs = {'return_train_score': return_train_score} _fit_and_score(*fit_and_score_args, **fit_and_score_kwargs) out, _ = capsys.readouterr() assert out.split('\n')[1] == expected
Example #6
Source File: test_validation.py From Mastering-Elasticsearch-7.0 with MIT License | 4 votes |
def test_fit_and_score_failing(): # Create a failing classifier to deliberately fail failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) # dummy X data X = np.arange(1, 10) y = np.ones(9) fit_and_score_args = [failing_clf, X, None, dict(), None, None, 0, None, None] # passing error score to trigger the warning message fit_and_score_kwargs = {'error_score': 0} # check if the warning message type is as expected assert_warns(FitFailedWarning, _fit_and_score, *fit_and_score_args, **fit_and_score_kwargs) # since we're using FailingClassfier, our error will be the following error_message = "ValueError: Failing classifier failed as required" # the warning message we're expecting to see warning_message = ("Estimator fit failed. The score on this train-test " "partition for these parameters will be set to %f. " "Details: \n%s" % (fit_and_score_kwargs['error_score'], error_message)) # check if the same warning is triggered assert_warns_message(FitFailedWarning, warning_message, _fit_and_score, *fit_and_score_args, **fit_and_score_kwargs) # check if warning was raised, with default error_score argument warning_message = ("From version 0.22, errors during fit will result " "in a cross validation score of NaN by default. Use " "error_score='raise' if you want an exception " "raised or error_score=np.nan to adopt the " "behavior from version 0.22.") with pytest.raises(ValueError): assert_warns_message(FutureWarning, warning_message, _fit_and_score, *fit_and_score_args) fit_and_score_kwargs = {'error_score': 'raise'} # check if exception was raised, with default error_score='raise' assert_raise_message(ValueError, "Failing classifier failed as required", _fit_and_score, *fit_and_score_args, **fit_and_score_kwargs) # check that functions upstream pass error_score param to _fit_and_score error_message = ("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)") assert_raise_message(ValueError, error_message, cross_validate, failing_clf, X, cv=3, error_score='unvalid-string') assert_raise_message(ValueError, error_message, cross_val_score, failing_clf, X, cv=3, error_score='unvalid-string') assert_raise_message(ValueError, error_message, learning_curve, failing_clf, X, y, cv=3, error_score='unvalid-string') assert_raise_message(ValueError, error_message, validation_curve, failing_clf, X, y, 'parameter', [FailingClassifier.FAILING_PARAMETER], cv=3, error_score='unvalid-string') assert_equal(failing_clf.score(), 0.) # FailingClassifier coverage