Python sklearn.linear_model.MultiTaskLasso() Examples
The following are 5
code examples of sklearn.linear_model.MultiTaskLasso().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.linear_model
, or try the search function
.
Example #1
Source File: test_mtl.py From celer with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_MultiTaskLasso(fit_intercept): """Test that our MultiTaskLasso behaves as sklearn's.""" X, Y = build_dataset(n_samples=20, n_features=30, n_targets=10) alpha_max = np.max(norm(X.T.dot(Y), axis=1)) / X.shape[0] alpha = alpha_max / 2. params = dict(alpha=alpha, fit_intercept=fit_intercept, tol=1e-10, normalize=True) clf = MultiTaskLasso(**params) clf.verbose = 2 clf.fit(X, Y) clf2 = sklearn_MultiTaskLasso(**params) clf2.fit(X, Y) np.testing.assert_allclose(clf.coef_, clf2.coef_, rtol=1e-5) if fit_intercept: np.testing.assert_allclose(clf.intercept_, clf2.intercept_) clf.tol = 1e-7 check_estimator(clf)
Example #2
Source File: match_space.py From SparseSC with MIT License | 6 votes |
def _MTLassoCV_MatchSpace( X, Y, v_pens=None, n_v_cv=5, sample_frac=1, Y_col_block_size=None, se_factor=None, normalize=True, **kwargs ): # pylint: disable=missing-param-doc, unused-argument # A fake MT would do Lasso on y_mean = Y.mean(axis=1) if sample_frac < 1: N = X.shape[0] sample = np.random.choice(N, int(sample_frac * N), replace=False) X = X[sample, :] Y = Y[sample, :] if Y_col_block_size is not None: Y = _block_summ_cols(Y, Y_col_block_size) varselectorfit = MultiTaskLassoCV(normalize=normalize, cv=n_v_cv, alphas=v_pens).fit( X, Y ) best_v_pen = varselectorfit.alpha_ if se_factor is not None: best_v_pen = _neg_se_rule(varselectorfit, factor=se_factor) varselectorfit = MultiTaskLasso(alpha=best_v_pen, normalize=normalize).fit(X, Y) V = np.sqrt( np.sum(np.square(varselectorfit.coef_), axis=0) ) # n_tasks x n_features -> n_feature m_sel = V != 0 transformer = SelMatchSpace(m_sel) return transformer, V[m_sel], best_v_pen, (V, varselectorfit)
Example #3
Source File: test_sklearn_glm_regressor_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_multi_task_lasso(self): model, X = fit_regression_model(linear_model.MultiTaskLasso(), n_targets=2) model_onnx = convert_sklearn( model, "multi-task lasso", [("input", FloatTensorType([None, X.shape[1]]))]) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, verbose=False, basename="SklearnMultiTaskLasso-Dec4", allow_failure="StrictVersion(" "onnxruntime.__version__)" "<= StrictVersion('0.2.1')", )
Example #4
Source File: match_space.py From SparseSC with MIT License | 5 votes |
def _MTLassoMixed_MatchSpace( X, Y, fit_model_wrapper, v_pens=None, n_v_cv=5, **kwargs ): # pylint: disable=missing-param-doc, unused-argument # Note that MultiTaskLasso(CV).path with the same alpha doesn't produce same results as MultiTaskLasso(CV) mtlasso_cv_fit = MultiTaskLassoCV(normalize=True, cv=n_v_cv, alphas=v_pens).fit( X, Y ) # V_cv = np.sqrt(np.sum(np.square(mtlasso_cv_fit.coef_), axis=0)) #n_tasks x n_features -> n_feature # v_pen_cv = mtlasso_cv_fit.alpha_ # m_sel_cv = (V_cv!=0) # sc_fit_cv = fit_model_wrapper(SelMatchSpace(m_sel_cv), V_cv[m_sel_cv]) v_pens = mtlasso_cv_fit.alphas_ # fits_single = {} Vs_single = {} scores = np.zeros((len(v_pens))) # R2s = np.zeros((len(v_pens))) for i, v_pen in enumerate(v_pens): mtlasso_i_fit = MultiTaskLasso(alpha=v_pen, normalize=True).fit(X, Y) V_i = np.sqrt(np.sum(np.square(mtlasso_i_fit.coef_), axis=0)) m_sel_i = V_i != 0 sc_fit_i = fit_model_wrapper(SelMatchSpace(m_sel_i), V_i[m_sel_i]) # fits_single[i] = sc_fit_i Vs_single[i] = V_i scores[i] = sc_fit_i.score # R2s[i] = sc_fit_i.score_R2 i_best = np.argmin(scores) # v_pen_best = v_pens[i_best] # i_cv = np.where(v_pens==v_pen_cv)[0][0] # print("CV alpha: " + str(v_pen_cv) + " (" + str(R2s[i_cv]) + ")." + " Best alpha: " + str(v_pen_best) + " (" + str(R2s[i_best]) + ") .") best_v_pen = v_pens[i_best] V_best = Vs_single[i_best] m_sel_best = V_best != 0 return SelMatchSpace(m_sel_best), V_best[m_sel_best], best_v_pen, V_best
Example #5
Source File: test_linear_model.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_objectmapper(self): df = pdml.ModelFrame([]) self.assertIs(df.linear_model.ARDRegression, lm.ARDRegression) self.assertIs(df.linear_model.BayesianRidge, lm.BayesianRidge) self.assertIs(df.linear_model.ElasticNet, lm.ElasticNet) self.assertIs(df.linear_model.ElasticNetCV, lm.ElasticNetCV) self.assertIs(df.linear_model.HuberRegressor, lm.HuberRegressor) self.assertIs(df.linear_model.Lars, lm.Lars) self.assertIs(df.linear_model.LarsCV, lm.LarsCV) self.assertIs(df.linear_model.Lasso, lm.Lasso) self.assertIs(df.linear_model.LassoCV, lm.LassoCV) self.assertIs(df.linear_model.LassoLars, lm.LassoLars) self.assertIs(df.linear_model.LassoLarsCV, lm.LassoLarsCV) self.assertIs(df.linear_model.LassoLarsIC, lm.LassoLarsIC) self.assertIs(df.linear_model.LinearRegression, lm.LinearRegression) self.assertIs(df.linear_model.LogisticRegression, lm.LogisticRegression) self.assertIs(df.linear_model.LogisticRegressionCV, lm.LogisticRegressionCV) self.assertIs(df.linear_model.MultiTaskLasso, lm.MultiTaskLasso) self.assertIs(df.linear_model.MultiTaskElasticNet, lm.MultiTaskElasticNet) self.assertIs(df.linear_model.MultiTaskLassoCV, lm.MultiTaskLassoCV) self.assertIs(df.linear_model.MultiTaskElasticNetCV, lm.MultiTaskElasticNetCV) self.assertIs(df.linear_model.OrthogonalMatchingPursuit, lm.OrthogonalMatchingPursuit) self.assertIs(df.linear_model.OrthogonalMatchingPursuitCV, lm.OrthogonalMatchingPursuitCV) self.assertIs(df.linear_model.PassiveAggressiveClassifier, lm.PassiveAggressiveClassifier) self.assertIs(df.linear_model.PassiveAggressiveRegressor, lm.PassiveAggressiveRegressor) self.assertIs(df.linear_model.Perceptron, lm.Perceptron) self.assertIs(df.linear_model.RandomizedLasso, lm.RandomizedLasso) self.assertIs(df.linear_model.RandomizedLogisticRegression, lm.RandomizedLogisticRegression) self.assertIs(df.linear_model.RANSACRegressor, lm.RANSACRegressor) self.assertIs(df.linear_model.Ridge, lm.Ridge) self.assertIs(df.linear_model.RidgeClassifier, lm.RidgeClassifier) self.assertIs(df.linear_model.RidgeClassifierCV, lm.RidgeClassifierCV) self.assertIs(df.linear_model.RidgeCV, lm.RidgeCV) self.assertIs(df.linear_model.SGDClassifier, lm.SGDClassifier) self.assertIs(df.linear_model.SGDRegressor, lm.SGDRegressor) self.assertIs(df.linear_model.TheilSenRegressor, lm.TheilSenRegressor)