Python sklearn.linear_model() Examples
The following are 16
code examples of sklearn.linear_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn
, or try the search function
.
Example #1
Source File: models.py From mlearn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, describer, regressor="LinearRegression", **kwargs): """ Args: describer (Describer): Describer to convert structure objects to descriptors. regressor (str): Name of LinearModel from sklearn.linear_model. Default to "LinearRegression", i.e., ordinary least squares. kwargs: kwargs to be passed to regressor. """ self.describer = describer self.regressor = regressor self.kwargs = kwargs import sklearn.linear_model as lm lr = getattr(lm, regressor) self.model = lr(**kwargs) self._xtrain = None self._xtest = None
Example #2
Source File: yatsm.py From yatsm with MIT License | 6 votes |
def __init__(self, test_indices=None, estimator={'object': sklearn.linear_model.Lasso(alpha=20), 'fit': {}}, **kwargs): self.test_indices = np.asarray(test_indices) self.estimator = sklearn.clone(estimator['object']) self.estimator_fit = estimator.get('fit', {}) self.models = [] # leave empty, fill in during `fit` self.n_record = 0 self.record = [] self.n_series, self.n_features = 0, 0 self.px = kwargs.get('px', 0) self.py = kwargs.get('py', 0)
Example #3
Source File: test_base.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_regressormixin_score_multioutput(): from sklearn.linear_model import LinearRegression # no warnings when y_type is continuous X = [[1], [2], [3]] y = [1, 2, 3] reg = LinearRegression().fit(X, y) assert_no_warnings(reg.score, X, y) # warn when y_type is continuous-multioutput y = [[1, 2], [2, 3], [3, 4]] reg = LinearRegression().fit(X, y) msg = ("The default value of multioutput (not exposed in " "score method) will change from 'variance_weighted' " "to 'uniform_average' in 0.23 to keep consistent " "with 'metrics.r2_score'. To specify the default " "value manually and avoid the warning, please " "either call 'metrics.r2_score' directly or make a " "custom scorer with 'metrics.make_scorer' (the " "built-in scorer 'r2' uses " "multioutput='uniform_average').") assert_warns_message(FutureWarning, msg, reg.score, X, y)
Example #4
Source File: regression.py From Azimuth with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train_linreg_model(alpha, l1r, learn_options, fold, X, y, y_all): ''' fold is something like train_inner (boolean array specifying what is in the fold) ''' if learn_options["penalty"] == "L2": clf = sklearn.linear_model.Ridge(alpha=alpha, fit_intercept=learn_options["fit_intercept"], normalize=learn_options['normalize_features'], copy_X=True, max_iter=None, tol=0.001, solver='auto') weights = get_weights(learn_options, fold, y, y_all) clf.fit(X[fold], y[fold], sample_weight=weights) elif learn_options["penalty"] == 'EN' or learn_options["penalty"] == 'L1': if learn_options["loss"] == "squared": clf = sklearn.linear_model.ElasticNet(alpha=alpha, l1_ratio=l1r, fit_intercept=learn_options["fit_intercept"], normalize=learn_options['normalize_features'], max_iter=3000) elif learn_options["loss"] == "huber": clf = sklearn.linear_model.SGDRegressor('huber', epsilon=0.7, alpha=alpha, l1_ratio=l1r, fit_intercept=learn_options["fit_intercept"], n_iter=10, penalty='elasticnet', shuffle=True, normalize=learn_options['normalize_features']) clf.fit(X[fold], y[fold]) return clf
Example #5
Source File: test_scikit-learn.py From pyodide with Mozilla Public License 2.0 | 6 votes |
def test_scikit_learn(selenium_standalone, request): selenium = selenium_standalone if selenium.browser == "chrome": request.applymarker(pytest.mark.xfail(run=False, reason="chrome not supported")) selenium.load_package("scikit-learn") assert ( selenium.run( """ import numpy as np import sklearn from sklearn.linear_model import LogisticRegression rng = np.random.RandomState(42) X = rng.rand(100, 20) y = rng.randint(5, size=100) estimator = LogisticRegression(solver='liblinear') estimator.fit(X, y) print(estimator.predict(X)) estimator.score(X, y) """ ) > 0 )
Example #6
Source File: test_monkeypatch.py From daal4py with Apache License 2.0 | 6 votes |
def test_monkey_patching(self): _tokens = daal4py.sklearn.sklearn_patch_names() self.assertTrue(isinstance(_tokens, list) and len(_tokens) > 0) for t in _tokens: daal4py.sklearn.unpatch_sklearn(t) for t in _tokens: daal4py.sklearn.patch_sklearn(t) import sklearn for a in [(sklearn.decomposition, 'PCA'), (sklearn.linear_model, 'Ridge'), (sklearn.linear_model, 'LinearRegression'), (sklearn.cluster, 'KMeans'), (sklearn.svm, 'SVC'),]: class_module = getattr(a[0], a[1]).__module__ self.assertTrue(class_module.startswith('daal4py'))
Example #7
Source File: baselines.py From AirBnbPricePrediction with MIT License | 6 votes |
def LinearModel(X_train, y_train, X_val, y_val): regr = linear_model.LinearRegression(n_jobs=int(0.8*n_cores)).fit(X_train, y_train) y_pred = regr.predict(X_val) # print('--------- For Model: LinearRegression --------- \n') # print('Coefficients: \n', regr.coef_) print("Mean squared error: %.2f" % mean_squared_error(y_val, y_pred)) print("R2: ", sklearn.metrics.r2_score(y_val, y_pred)) # ============================================================================= # plt.scatter(y_val, y_pred/y_val, color='black') # # plt.plot(x, y_pred, color='blue', linewidth=3) # plt.title('Linear Model Baseline') # plt.xlabel('$y_{test}$') # plt.ylabel('$y_{predicted}/y_{test}$') # plt.savefig('Linear Model Baseline.png', bbox_inches='tight') # ============================================================================= return
Example #8
Source File: tensorsketch.py From fastchess with GNU General Public License v3.0 | 5 votes |
def __init__(self, to_path): self.to_path = to_path self.boards = [] self.moves = [] self.scores = [] self.sketches = [FJL((6 * 2 * 64)**2, 10000) # , FJL(6*2*64*1000, 1000) ] self.move_model = sklearn.linear_model.SGDClassifier(loss='log', n_jobs=8 ) # , max_iter=100, tol=.01)
Example #9
Source File: tensorsketch.py From fastchess with GNU General Public License v3.0 | 5 votes |
def done(self): print('Caching data to games.cached') joblib.dump((self.boards, self.moves, self.scores), 'games.cached') n = len(self.boards) print(f'Got {n} examples') p = int(n * .8) print('Training move model') #move_clf = self.move_model.partial_fit(self.boards[:p], self.moves[:p], classes=range(64**2)) move_clf = self.move_model.fit(self.boards[:p], self.moves[:p] # , classes=range(64**2) ) test = move_clf.score(self.boards[p:], self.moves[p:]) # clf = sklearn.linear_model.LogisticRegression( # solver='saga', multi_class='auto', verbose=1) print(f'Test score: {test}') print('Training score model.') model = sklearn.linear_model.LinearRegression(n_jobs=8) score_clf = model.fit(self.boards[:p], self.scores[:p]) test = score_clf.score(self.boards[p:], self.scores[p:]) print(f'Test score: {test}') joblib.dump(Model(move_clf, score_clf, self.sketches), self.to_path) print(f'Saved model as {self.to_path}')
Example #10
Source File: field_based_ml_field_detection.py From lexpredict-contraxsuite with GNU Affero General Public License v3.0 | 5 votes |
def init_classifier_impl(field_code: str, init_script: str): if init_script is not None: init_script = init_script.strip() if not init_script: from sklearn import tree as sklearn_tree return sklearn_tree.DecisionTreeClassifier() from sklearn import tree as sklearn_tree from sklearn import neural_network as sklearn_neural_network from sklearn import neighbors as sklearn_neighbors from sklearn import svm as sklearn_svm from sklearn import gaussian_process as sklearn_gaussian_process from sklearn.gaussian_process import kernels as sklearn_gaussian_process_kernels from sklearn import ensemble as sklearn_ensemble from sklearn import naive_bayes as sklearn_naive_bayes from sklearn import discriminant_analysis as sklearn_discriminant_analysis from sklearn import linear_model as sklearn_linear_model eval_locals = { 'sklearn_linear_model': sklearn_linear_model, 'sklearn_tree': sklearn_tree, 'sklearn_neural_network': sklearn_neural_network, 'sklearn_neighbors': sklearn_neighbors, 'sklearn_svm': sklearn_svm, 'sklearn_gaussian_process': sklearn_gaussian_process, 'sklearn_gaussian_process_kernels': sklearn_gaussian_process_kernels, 'sklearn_ensemble': sklearn_ensemble, 'sklearn_naive_bayes': sklearn_naive_bayes, 'sklearn_discriminant_analysis': sklearn_discriminant_analysis } return eval_script('classifier init script of field {0}'.format(field_code), init_script, eval_locals)
Example #11
Source File: example.py From d6tflow with MIT License | 5 votes |
def run(self): df_train = self.input().load() if self.model=='ols': model = sklearn.linear_model.LogisticRegression() elif self.model=='svm': model = sklearn.svm.SVC() else: raise ValueError('invalid model selection') model.fit(df_train.iloc[:,:-1], df_train['y']) self.save(model) # Check task dependencies and their execution status
Example #12
Source File: main.py From DeeplearningAI_AndrewNg with MIT License | 5 votes |
def simple_lr(self): clf = sklearn.linear_model.LogisticRegressionCV() clf.fit(self.X.T, np.squeeze(self.Y)) print self.X.T.shape plot_decision_boundary(lambda x: clf.predict(x), self.X, np.squeeze(self.Y)) plt.title("Logistic Regression") plt.show() LR_predictions = clf.predict(self.X.T) print ('Accuracy of logistic regression: %d ' % float( (np.dot(self.Y, LR_predictions) + np.dot(1 - self.Y, 1 - LR_predictions)) / float(self.Y.size) * 100) + '% ' + "(percentage of correctly labelled datapoints)") return self
Example #13
Source File: ensembles.py From Azimuth with BSD 3-Clause "New" or "Revised" License | 5 votes |
def linear_stacking(y_train, X_train, X_test): clf = sklearn.linear_model.LinearRegression() clf.fit(X_train, y_train) y_pred = clf.predict(X_test) return y_pred.flatten()
Example #14
Source File: run_models.py From AirBnbPricePrediction with MIT License | 5 votes |
def LinearModel(X_train, y_train, X_val, y_val): regr = linear_model.LinearRegression(n_jobs=int(0.8*n_cores)).fit(X_train, y_train) print_evaluation_metrics(regr, "linear model", X_val, y_val.values.ravel()) print_evaluation_metrics2(regr, "linear model", X_train, y_train.values.ravel()) return
Example #15
Source File: test_metrics.py From dask-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_log_loss_scoring(y): # a_scorer = sklearn.metrics.get_scorer('neg_log_loss') # b_scorer = dask_ml.metrics.get_scorer('neg_log_loss') X = da.random.uniform(size=(4, 2), chunks=2) labels = np.unique(y) y = da.from_array(np.array(y), chunks=2) a_scorer = sklearn.metrics.make_scorer( sklearn.metrics.log_loss, greater_is_better=False, needs_proba=True, labels=labels, ) b_scorer = sklearn.metrics.make_scorer( dask_ml.metrics.log_loss, greater_is_better=False, needs_proba=True, labels=labels, ) clf = dask_ml.wrappers.ParallelPostFit( sklearn.linear_model.LogisticRegression( n_jobs=1, solver="lbfgs", multi_class="auto" ) ) clf.fit(*dask.compute(X, y)) result = b_scorer(clf, X, y) expected = a_scorer(clf, *dask.compute(X, y)) assert_eq(result, expected)
Example #16
Source File: class1_presentation_predictor.py From mhcflurry with Apache License 2.0 | 5 votes |
def get_model(self, name=None): """ Load or instantiate a new logistic regression model. Private helper method. Parameters ---------- name : string If None (the default), an un-fit LR model is returned. Otherwise the weights are loaded for the specified model. Returns ------- sklearn.linear_model.LogisticRegression """ if name is None or name not in self._models_cache: model = sklearn.linear_model.LogisticRegression(solver="lbfgs") if name is not None: row = self.weights_dataframe.loc[name] model.intercept_ = row.intercept model.coef_ = numpy.expand_dims( row[self.model_inputs].values, axis=0) model.classes_ = numpy.array([0, 1]) else: model = self._models_cache[name] return model