Python sklearn.utils.metaestimators._safe_split() Examples
The following are 5
code examples of sklearn.utils.metaestimators._safe_split().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.utils.metaestimators
, or try the search function
.
Example #1
Source File: test_multiclass.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = datasets.load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = ShuffleSplit(test_size=0.25, random_state=0) train, test = list(cv.split(X))[0] X_train, y_train = _safe_split(clf, X, y, train) K_train, y_train2 = _safe_split(clfp, K, y, train) assert_array_almost_equal(K_train, np.dot(X_train, X_train.T)) assert_array_almost_equal(y_train, y_train2) X_test, y_test = _safe_split(clf, X, y, test, train) K_test, y_test2 = _safe_split(clfp, K, y, test, train) assert_array_almost_equal(K_test, np.dot(X_test, X_train.T)) assert_array_almost_equal(y_test, y_test2)
Example #2
Source File: helpers.py From lale with Apache License 2.0 | 6 votes |
def split_with_schemas(estimator, all_X, all_y, indices, train_indices=None): subset_X, subset_y = _safe_split( estimator, all_X, all_y, indices, train_indices) if hasattr(all_X, 'json_schema'): n_rows = subset_X.shape[0] schema = { 'type': 'array', 'minItems': n_rows, 'maxItems': n_rows, 'items': all_X.json_schema['items']} lale.datasets.data_schemas.add_schema(subset_X, schema) if hasattr(all_y, 'json_schema'): n_rows = subset_y.shape[0] schema = { 'type': 'array', 'minItems': n_rows, 'maxItems': n_rows, 'items': all_y.json_schema['items']} lale.datasets.data_schemas.add_schema(subset_y, schema) return subset_X, subset_y
Example #3
Source File: test_multiclass.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = datasets.load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = ShuffleSplit(test_size=0.25, random_state=0) train, test = list(cv.split(X))[0] X_train, y_train = _safe_split(clf, X, y, train) K_train, y_train2 = _safe_split(clfp, K, y, train) assert_array_almost_equal(K_train, np.dot(X_train, X_train.T)) assert_array_almost_equal(y_train, y_train2) X_test, y_test = _safe_split(clf, X, y, test, train) K_test, y_test2 = _safe_split(clfp, K, y, test, train) assert_array_almost_equal(K_test, np.dot(X_test, X_train.T)) assert_array_almost_equal(y_test, y_test2)
Example #4
Source File: _validation.py From mriqc with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _permutation_test_score(estimator, X, y, groups, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv.split(X, y, groups): X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) estimator.fit(X_train, y_train) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score)
Example #5
Source File: sklearn.py From optuna with MIT License | 4 votes |
def _partial_fit_and_score( self, estimator, # type: BaseEstimator train, # type: List[int] test, # type: List[int] partial_fit_params, # type: Dict[str, Any] ): # type: (...) -> List[Number] X_train, y_train = _safe_split(estimator, self.X, self.y, train) X_test, y_test = _safe_split(estimator, self.X, self.y, test, train_indices=train) start_time = time() try: estimator.partial_fit(X_train, y_train, **partial_fit_params) except Exception as e: if self.error_score == "raise": raise e elif isinstance(self.error_score, Number): fit_time = time() - start_time test_score = self.error_score score_time = 0.0 if self.return_train_score: train_score = self.error_score else: raise ValueError("error_score must be 'raise' or numeric.") else: fit_time = time() - start_time test_score = self.scoring(estimator, X_test, y_test) score_time = time() - fit_time - start_time if self.return_train_score: train_score = self.scoring(estimator, X_train, y_train) # Required for type checking but is never expected to fail. assert isinstance(fit_time, Number) assert isinstance(score_time, Number) ret = [test_score, fit_time, score_time] if self.return_train_score: ret.insert(0, train_score) return ret