Python sklearn.svm.NuSVR() Examples
The following are 20
code examples of sklearn.svm.NuSVR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.svm
, or try the search function
.
Example #1
Source File: test_gradient_boosting.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_gradient_boosting_with_init_pipeline(): # Check that the init estimator can be a pipeline (see issue #13466) X, y = make_regression(random_state=0) init = make_pipeline(LinearRegression()) gb = GradientBoostingRegressor(init=init) gb.fit(X, y) # pipeline without sample_weight works fine with pytest.raises( ValueError, match='The initial estimator Pipeline does not support sample ' 'weights'): gb.fit(X, y, sample_weight=np.ones(X.shape[0])) # Passing sample_weight to a pipeline raises a ValueError. This test makes # sure we make the distinction between ValueError raised by a pipeline that # was passed sample_weight, and a ValueError raised by a regular estimator # whose input checking failed. with pytest.raises( ValueError, match='nu <= 0 or nu > 1'): # Note that NuSVR properly supports sample_weight init = NuSVR(gamma='auto', nu=1.5) gb = GradientBoostingRegressor(init=init) gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
Example #2
Source File: test_svm.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
Example #3
Source File: test_svm.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC() clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC() clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
Example #4
Source File: _NuSVR.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def convert(model, feature_names, target): """Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec. Parameters ---------- model: NuSVR A trained NuSVR encoder model. feature_names: [str] Name of the input columns. target: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ if not (_HAS_SKLEARN): raise RuntimeError( "scikit-learn not found. scikit-learn conversion API is disabled." ) _sklearn_util.check_expected_type(model, _NuSVR) return _SVR.convert(model, feature_names, target)
Example #5
Source File: test_svm.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
Example #6
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_nusvr_bool(self): model, X = fit_regression_model( NuSVR(), is_bool=True) model_onnx = convert_sklearn( model, "NuSVR", [("input", BooleanTensorType([None, X.shape[1]]))], ) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnNuSVRBool", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')" )
Example #7
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_nusvr(self): model, X = self._fit_binary_classification(NuSVR()) model_onnx = convert_sklearn( model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))]) node = model_onnx.graph.node[0] self.assertIsNotNone(node) self._check_attributes( node, { "coefficients": None, "kernel_params": None, "kernel_type": "RBF", "post_transform": None, "rho": None, "support_vectors": None, }, ) dump_data_and_model(X, model, model_onnx, basename="SklearnRegNuSVR")
Example #8
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_nusvr_int(self): model, X = fit_regression_model( NuSVR(), is_int=True) model_onnx = convert_sklearn( model, "NuSVR", [("input", Int64TensorType([None, X.shape[1]]))], ) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnNuSVRInt-Dec4", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')" )
Example #9
Source File: test_svm.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_)
Example #10
Source File: test_svm.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC() assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR() assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1
Example #11
Source File: test_svm.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0)
Example #12
Source File: test_svm.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_objectmapper(self): df = pdml.ModelFrame([]) self.assertIs(df.svm.SVC, svm.SVC) self.assertIs(df.svm.LinearSVC, svm.LinearSVC) self.assertIs(df.svm.NuSVC, svm.NuSVC) self.assertIs(df.svm.SVR, svm.SVR) self.assertIs(df.svm.NuSVR, svm.NuSVR) self.assertIs(df.svm.OneClassSVM, svm.OneClassSVM)
Example #13
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 5 votes |
def test_convert_nusvr_default(self): model, X = self._fit_binary_classification(NuSVR()) model_onnx = convert_sklearn( model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))]) self.assertIsNotNone(model_onnx) dump_data_and_model(X, model, model_onnx, basename="SklearnRegNuSVR2")
Example #14
Source File: test_NuSVR.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_conversion_bad_inputs(self): # Error on converting an untrained model with self.assertRaises(TypeError): model = NuSVR() spec = scikit_converter.convert(model, "data", "out") # Check the expected class during covnersion. with self.assertRaises(TypeError): model = OneHotEncoder() spec = scikit_converter.convert(model, "data", "out")
Example #15
Source File: test_NuSVR.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUpClass(self): """ Set up the unit test by loading the dataset and training a model. """ if not _HAS_SKLEARN: return self.scikit_model = NuSVR(kernel="linear") self.data = load_boston() self.scikit_model.fit(self.data["data"], self.data["target"])
Example #16
Source File: test_svm.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1
Example #17
Source File: test_svm.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0)
Example #18
Source File: test_svm.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
Example #19
Source File: test_NuSVR.py From coremltools with BSD 3-Clause "New" or "Revised" License | 4 votes |
def _test_evaluation(self, allow_slow): """ Test that the same predictions are made """ # Generate some smallish (some kernels take too long on anything else) random data x, y = [], [] for _ in range(50): cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) x.append([cur_x1, cur_x2]) y.append(1 + 2 * cur_x1 + 3 * cur_x2) input_names = ["x1", "x2"] df = pd.DataFrame(x, columns=input_names) # Parameters to test kernel_parameters = [ {}, {"kernel": "rbf", "gamma": 1.2}, {"kernel": "linear"}, {"kernel": "poly"}, {"kernel": "poly", "degree": 2}, {"kernel": "poly", "gamma": 0.75}, {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, {"kernel": "sigmoid"}, {"kernel": "sigmoid", "gamma": 1.3}, {"kernel": "sigmoid", "coef0": 0.8}, {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, ] non_kernel_parameters = [ {}, {"C": 1}, {"C": 1.5, "shrinking": True}, {"C": 0.5, "shrinking": False, "nu": 0.9}, ] # Test for param1 in non_kernel_parameters: for param2 in kernel_parameters: cur_params = param1.copy() cur_params.update(param2) cur_model = NuSVR(**cur_params) cur_model.fit(x, y) df["prediction"] = cur_model.predict(x) spec = scikit_converter.convert(cur_model, input_names, "target") if _is_macos() and _macos_version() >= (10, 13): metrics = evaluate_regressor(spec, df) self.assertAlmostEquals(metrics["max_error"], 0) if not allow_slow: break if not allow_slow: break
Example #20
Source File: nusvm.py From driverlessai-recipes with Apache License 2.0 | 4 votes |
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): X = dt.Frame(X) orig_cols = list(X.names) if self.num_classes >= 2: feature_model = NuSVC(kernel='linear', nu=self.params['nu']) model = NuSVC(nu=self.params['nu'], kernel=self.params['kernel'], degree=self.params['degree'], probability=self.params['probability']) lb = LabelEncoder() lb.fit(self.labels) y = lb.transform(y) else: feature_model = NuSVR(kernel='linear', nu=self.params['nu']) model = NuSVR(nu=self.params['nu'], kernel=self.params['kernel'], degree=self.params['degree']) self.means = dict() for col in X.names: XX = X[:, col] self.means[col] = XX.mean1() if self.means[col] is None: self.means[col] = 0 XX.replace(None, self.means[col]) X[:, col] = XX assert X[dt.isna(dt.f[col]), col].nrows == 0 X = X.to_numpy() # nu is infeasible sometimes # doing quaternary search on both sides of selected nu valid_nu = None while valid_nu is None: try: model.fit(X, y) valid_nu = self.params['nu'] except: if self.params['nu'] > 0.5: self.params['nu'] = 1.0 - self.params['nu'] else: self.params['nu'] = (4.0 - 3.0 * self.params['nu']) / 4.0 if self.num_classes >= 2: feature_model = NuSVC(kernel='linear', nu=self.params['nu']) model = NuSVC(nu=self.params['nu'], kernel=self.params['kernel'], degree=self.params['degree'], probability=self.params['probability']) else: feature_model = NuSVR(kernel='linear', nu=self.params['nu']) model = NuSVR(nu=self.params['nu'], kernel=self.params['kernel'], degree=self.params['degree']) feature_model.fit(X, y) importances = np.array(abs(feature_model.coef_)).ravel() self.set_model_properties(model=model, features=orig_cols, importances=importances.tolist(), iterations=0)