Python sklearn.neural_network.MLPRegressor() Examples
The following are 30
code examples of sklearn.neural_network.MLPRegressor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.neural_network
, or try the search function
.
Example #1
Source File: testScoreWithAdapaSklearn.py From nyoka with Apache License 2.0 | 6 votes |
def test_37_mlp_regressor(self): print("\ntest 37 (mlp regressor without preprocessing)\n") X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression() model = MLPRegressor() pipeline_obj = Pipeline([ ("model", model) ]) pipeline_obj.fit(X,y) file_name = 'test37sklearn.pmml' skl_to_pmml(pipeline_obj, features, target, file_name) model_name = self.adapa_utility.upload_to_zserver(file_name) predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file) model_pred = pipeline_obj.predict(X_test) self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
Example #2
Source File: test_sklearn_mlp_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_mlp_regressor_logistic(self): model, X_test = fit_regression_model( MLPRegressor(random_state=42, activation="logistic")) model_onnx = convert_sklearn( model, "scikit-learn MLPRegressor", [("input", FloatTensorType([None, X_test.shape[1]]))], ) self.assertTrue(model_onnx is not None) dump_data_and_model( X_test, model, model_onnx, basename="SklearnMLPRegressorLogisticActivation-Dec4", allow_failure="StrictVersion(" "onnxruntime.__version__)<= StrictVersion('0.2.1')", )
Example #3
Source File: test_sklearn_mlp_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_mlp_regressor_identity(self): model, X_test = fit_regression_model( MLPRegressor(random_state=42, activation="identity"), is_int=True) model_onnx = convert_sklearn( model, "scikit-learn MLPRegressor", [("input", Int64TensorType([None, X_test.shape[1]]))], ) self.assertTrue(model_onnx is not None) dump_data_and_model( X_test, model, model_onnx, basename="SklearnMLPRegressorIdentityActivation-Dec4", allow_failure="StrictVersion(" "onnxruntime.__version__)<= StrictVersion('0.2.1')", )
Example #4
Source File: test_sklearn_mlp_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_mlp_regressor_default(self): model, X_test = fit_regression_model( MLPRegressor(random_state=42)) model_onnx = convert_sklearn( model, "scikit-learn MLPRegressor", [("input", FloatTensorType([None, X_test.shape[1]]))], ) self.assertTrue(model_onnx is not None) dump_data_and_model( X_test, model, model_onnx, basename="SklearnMLPRegressor-Dec4", allow_failure="StrictVersion(" "onnxruntime.__version__)<= StrictVersion('0.2.1')", )
Example #5
Source File: test_sklearn_one_vs_rest_classifier_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_ovr_regression_float_mlp(self): model, X = fit_classification_model( OneVsRestClassifier(MLPRegressor()), 5) model_onnx = convert_sklearn( model, "ovr regression", [("input", FloatTensorType([None, X.shape[1]]))], target_opset=TARGET_OPSET ) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnOVRRegressionFloatMLP-Out0", allow_failure="StrictVersion(onnxruntime.__version__)" "<= StrictVersion('0.2.1')", )
Example #6
Source File: test_standardization.py From causallib with Apache License 2.0 | 6 votes |
def ensure_many_models(self): from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR, LinearSVR import warnings from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings('ignore', category=ConvergenceWarning) for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor, ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor, KNeighborsRegressor, SVR, LinearSVR]: learner = learner() learner_name = str(learner).split("(", maxsplit=1)[0] with self.subTest("Test fit using {learner}".format(learner=learner_name)): model = self.estimator.__class__(learner) model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"]) self.assertTrue(True) # Fit did not crash
Example #7
Source File: test_sklearn_mlp_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_mlp_regressor_bool(self): model, X_test = fit_regression_model( MLPRegressor(random_state=42), is_bool=True) model_onnx = convert_sklearn( model, "scikit-learn MLPRegressor", [("input", BooleanTensorType([None, X_test.shape[1]]))], ) self.assertTrue(model_onnx is not None) dump_data_and_model( X_test, model, model_onnx, basename="SklearnMLPRegressorBool", allow_failure="StrictVersion(" "onnxruntime.__version__)<= StrictVersion('0.2.1')", )
Example #8
Source File: test_abstract_models.py From delira with GNU Affero General Public License v3.0 | 6 votes |
def _setup_sklearn(*args): from delira.models import SklearnEstimator from sklearn.neural_network import MLPRegressor class Model(SklearnEstimator): def __init__(self): # prefit to enable prediction mode afterwards module = MLPRegressor() module.fit(*args) super().__init__(module) @staticmethod def prepare_batch(batch: dict, input_device, output_device): return batch return Model()
Example #9
Source File: test_sklearn_glm_regressor_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_ransac_regressor_mlp(self): model, X = fit_regression_model( linear_model.RANSACRegressor( base_estimator=MLPRegressor(solver='lbfgs'))) model_onnx = convert_sklearn( model, "ransac regressor", [("input", FloatTensorType([None, X.shape[1]]))]) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, verbose=False, basename="SklearnRANSACRegressorMLP-Dec3", allow_failure="StrictVersion(" "onnxruntime.__version__)" "<= StrictVersion('0.2.1')", )
Example #10
Source File: dummy_outcome_refuter.py From dowhy with MIT License | 6 votes |
def _get_regressor_object(self, action, **func_args): """ Return a sklearn estimator object based on the estimator and corresponding parameters - 'action': str The sklearn estimator used. - 'func_args': variable length keyworded argument The parameters passed to the sklearn estimator. """ if action == "linear_regression": return LinearRegression(**func_args) elif action == "knn": return KNeighborsRegressor(**func_args) elif action == "svm": return SVR(**func_args) elif action == "random_forest": return RandomForestRegressor(**func_args) elif action == "neural_network": return MLPRegressor(**func_args) else: raise ValueError("The function: {} is not supported by dowhy at the moment.".format(action))
Example #11
Source File: test_mlp.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_partial_fit_regression(): # Test partial_fit on regression. # `partial_fit` should yield the same results as 'fit' for regression. X = Xboston y = yboston for momentum in [0, .9]: mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu', random_state=1, learning_rate_init=0.01, batch_size=X.shape[0], momentum=momentum) with warnings.catch_warnings(record=True): # catch convergence warning mlp.fit(X, y) pred1 = mlp.predict(X) mlp = MLPRegressor(solver='sgd', activation='relu', learning_rate_init=0.01, random_state=1, batch_size=X.shape[0], momentum=momentum) for i in range(100): mlp.partial_fit(X, y) pred2 = mlp.predict(X) assert_almost_equal(pred1, pred2, decimal=2) score = mlp.score(X, y) assert_greater(score, 0.75)
Example #12
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_partial_fit_regression(): # Test partial_fit on regression. # `partial_fit` should yield the same results as 'fit' for regression. X = Xboston y = yboston for momentum in [0, .9]: mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu', random_state=1, learning_rate_init=0.01, batch_size=X.shape[0], momentum=momentum) with warnings.catch_warnings(record=True): # catch convergence warning mlp.fit(X, y) pred1 = mlp.predict(X) mlp = MLPRegressor(solver='sgd', activation='relu', learning_rate_init=0.01, random_state=1, batch_size=X.shape[0], momentum=momentum) for i in range(100): mlp.partial_fit(X, y) pred2 = mlp.predict(X) assert_almost_equal(pred1, pred2, decimal=2) score = mlp.score(X, y) assert_greater(score, 0.75)
Example #13
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_shuffle(): # Test that the shuffle parameter affects the training process (it should) X, y = make_regression(n_samples=50, n_features=5, n_targets=1, random_state=0) # The coefficients will be identical if both do or do not shuffle for shuffle in [True, False]: mlp1 = MLPRegressor(hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=shuffle) mlp2 = MLPRegressor(hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=shuffle) mlp1.fit(X, y) mlp2.fit(X, y) assert np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0]) # The coefficients will be slightly different if shuffle=True mlp1 = MLPRegressor(hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=True) mlp2 = MLPRegressor(hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=False) mlp1.fit(X, y) mlp2.fit(X, y) assert not np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
Example #14
Source File: PLECscore.py From oddt with BSD 3-Clause "New" or "Revised" License | 5 votes |
def gen_json(self, home_dir=None, pdbbind_version=2016): if not home_dir: home_dir = path_join(dirname(__file__), 'PLECscore') if isinstance(self.model, SGDRegressor): attributes = ['coef_', 'intercept_', 't_'] elif isinstance(self.model, MLPRegressor): attributes = ['loss_', 'coefs_', 'intercepts_', 'n_iter_', 'n_layers_', 'n_outputs_', 'out_activation_'] out = {} for attr_name in attributes: attr = getattr(self.model, attr_name) # convert numpy arrays to list for json if isinstance(attr, np.ndarray): attr = attr.tolist() elif (isinstance(attr, (list, tuple)) and isinstance(attr[0], np.ndarray)): attr = [x.tolist() for x in attr] out[attr_name] = attr json_path = path_join(home_dir, 'plecscore_%s_p%i_l%i_s%i_pdbbind%i.json' % (self.version, self.depth_protein, self.depth_ligand, self.size, pdbbind_version)) with open(json_path, 'w') as json_f: json.dump(out, json_f, indent=2) return json_path
Example #15
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_lbfgs_regression(): # Test lbfgs on the boston dataset, a regression problems. X = Xboston y = yboston for activation in ACTIVATION_TYPES: mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=150, shuffle=True, random_state=1, activation=activation) mlp.fit(X, y) if activation == 'identity': assert_greater(mlp.score(X, y), 0.84) else: # Non linear models perform much better than linear bottleneck: assert_greater(mlp.score(X, y), 0.95)
Example #16
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_multioutput_regression(): # Test that multi-output regression works as expected X, y = make_regression(n_samples=200, n_targets=5) mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200, random_state=1) mlp.fit(X, y) assert_greater(mlp.score(X, y), 0.9)
Example #17
Source File: test_algebra_cascade.py From sklearn-onnx with MIT License | 5 votes |
def test_model_mlp_regressor_default(self): model, X_test = fit_regression_model( MLPRegressor(random_state=42)) exp = model.predict(X_test) for opv in (1, 2, 7, 8, 9, 10, 11, 12, onnx_opset_version()): if opv is not None and opv > get_latest_tested_opset_version(): continue try: onx = convert_sklearn( model, "scikit-learn MLPRegressor", [("input", FloatTensorType([None, X_test.shape[1]]))], target_opset=opv) except RuntimeError as e: if ("is higher than the number of the " "installed onnx package") in str(e): continue raise e as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except (RuntimeError, InvalidGraph, Fail) as e: if opv in (None, 1, 2): continue if opv >= onnx_opset_version(): continue if ("No suitable kernel definition found for " "op Cast(9)") in str(e): # too old onnxruntime continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format( opv, onx)) from e res_out = ort.run(None, {'input': X_test}) assert len(res_out) == 1 res = res_out[0] assert_almost_equal(exp.ravel(), res.ravel(), decimal=4)
Example #18
Source File: _model.py From scitime with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _random_search(self, inputs, outputs, iterations, save_model=False): """ performs a random search on the NN meta algo to find the best params :param inputs: pd.DataFrame chosen as input :param outputs: pd.DataFrame chosen as output :param iterations: Number of parameter settings that are sampled :param save_model: boolean set to True if the model needs to be saved :return: best meta_algo with parameters :rtype: scikit learn RandomizedSearchCV object """ X, y, cols, original_cols = self._transform_data(inputs, outputs) if self.meta_algo != 'NN': raise KeyError(f'''meta algo {self.meta_algo} not supported for random search''') parameter_space = config("random_search_params") meta_algo = MLPRegressor(max_iter=200) X_train, X_test, y_train, y_test \ = train_test_split(X, y, test_size=0.20, random_state=42) X_train, X_test = self._scale_data(X_train, X_test, save_model) meta_algo = RandomizedSearchCV(meta_algo, parameter_space, n_iter=iterations, n_jobs=2) meta_algo.fit(X_train, y_train) if self.verbose >= 2: self.logger.info(f'''Best parameters found: {meta_algo.best_estimator_}''') return meta_algo
Example #19
Source File: model.py From numerox with GNU General Public License v3.0 | 5 votes |
def fit_predict(self, dfit, dpre, tournament): clf = MLPC(hidden_layer_sizes=self.p['layers'], alpha=self.p['alpha'], activation=self.p['activation'], learning_rate_init=self.p['learn'], random_state=self.p['seed'], max_iter=200) clf.fit(dfit.x, dfit.y[tournament]) yhat = clf.predict(dpre.x) return dpre.ids, yhat # model used by numerai to generate example_predictions.csv
Example #20
Source File: run_models.py From AirBnbPricePrediction with MIT License | 5 votes |
def get_mlp_regressor(num_hidden_units=51): mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units) return [mlp], ['Multi-Layer Perceptron']
Example #21
Source File: baselines.py From AirBnbPricePrediction with MIT License | 5 votes |
def get_mlp_regressor(num_hidden_units=51): mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units) return [mlp], ['Multi-Layer Perceptron']
Example #22
Source File: cart_pole.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def initMlp(self, netParams): """ initializes a MultiLayer Perceptron (MLP) Regressor with the desired network architecture (layers) and network parameters (weights and biases). :param netParams: a list of floats representing the network parameters (weights and biases) of the MLP :return: initialized MLP Regressor """ # create the initial MLP: mlp = MLPRegressor(hidden_layer_sizes=(HIDDEN_LAYER,), max_iter=1) # This will initialize input and output layers, and nodes weights and biases: # we are not otherwise interested in training the MLP here, hence the settings max_iter=1 above mlp.fit(np.random.uniform(low=-1, high=1, size=INPUTS).reshape(1, -1), np.ones(OUTPUTS)) # weights are represented as a list of 2 ndarrays: # - hidden layer weights: INPUTS x HIDDEN_LAYER # - output layer weights: HIDDEN_LAYER x OUTPUTS numWeights = INPUTS * HIDDEN_LAYER + HIDDEN_LAYER * OUTPUTS weights = np.array(netParams[:numWeights]) mlp.coefs_ = [ weights[0:INPUTS * HIDDEN_LAYER].reshape((INPUTS, HIDDEN_LAYER)), weights[INPUTS * HIDDEN_LAYER:].reshape((HIDDEN_LAYER, OUTPUTS)) ] # biases are represented as a list of 2 ndarrays: # - hidden layer biases: HIDDEN_LAYER x 1 # - output layer biases: OUTPUTS x 1 biases = np.array(netParams[numWeights:]) mlp.intercepts_ = [biases[:HIDDEN_LAYER], biases[HIDDEN_LAYER:]] return mlp
Example #23
Source File: test_mlp.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_multioutput_regression(): # Test that multi-output regression works as expected X, y = make_regression(n_samples=200, n_targets=5) mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200, random_state=1) mlp.fit(X, y) assert_greater(mlp.score(X, y), 0.9)
Example #24
Source File: test_doublyrobust.py From causallib with Apache License 2.0 | 5 votes |
def ensure_many_models(self): from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR, LinearSVR from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings('ignore', category=ConvergenceWarning) data = self.create_uninformative_ox_dataset() for propensity_learner in [GradientBoostingClassifier(n_estimators=10), RandomForestClassifier(n_estimators=100), MLPClassifier(hidden_layer_sizes=(5,)), KNeighborsClassifier(n_neighbors=20)]: weight_model = IPW(propensity_learner) propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0] for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10), MLPRegressor(hidden_layer_sizes=(5,)), ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(), KNeighborsRegressor(), SVR(), LinearSVR()]: outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0] outcome_model = Standardization(outcome_learner) with self.subTest("Test fit & predict using {} & {}".format(propensity_learner_name, outcome_learner_name)): model = self.estimator.__class__(outcome_model, weight_model) model.fit(data["X"], data["a"], data["y"], refit_weight_model=False) model.estimate_individual_outcome(data["X"], data["a"]) self.assertTrue(True) # Fit did not crash
Example #25
Source File: SpectraLearnPredict.py From SpectralMachine with GNU General Public License v3.0 | 5 votes |
def predNN(clf, A, Cl, R): if nnDef.MLPRegressor is False: prob = clf.predict_proba(R)[0].tolist() rosterPred = np.where(clf.predict_proba(R)[0]>nnDef.thresholdProbabilityPred/100)[0] print('\n ==============================') print(' \033[1mNN\033[0m - Probability >',str(nnDef.thresholdProbabilityPred),'%') print(' ==============================') print(' Prediction\tProbability [%]') for i in range(rosterPred.shape[0]): print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*clf.predict_proba(R)[0][rosterPred][i]))) print(' ==============================') predValue = clf.predict(R)[0] predProb = round(100*max(prob),4) print('\033[1m' + '\n Predicted classifier value (Deep Neural Networks - sklearn) = ' + str(predValue) + ' (probability = ' + str(predProb) + '%)\033[0m\n') else: Cl = np.array(Cl,dtype=float) predValue = clf.predict(R)[0] predProb = clf.score(A,Cl) print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - sklearn) = ' + str('{:.3f}'.format(predValue)) + ' (R^2 = ' + str('{:.5f}'.format(predProb)) + ')\033[0m\n') #************************************** ''' Neural Networks Classification Report ''' #************************************** if nnDef.nnClassReport == True: print(' Neural Networks Classification Report\n') runClassReport(clf, A, Cl) #************************* ''' Plot probabilities ''' #************************* if plotDef.showProbPlot == True: if nnDef.MLPRegressor is False: plotProb(clf, R) return predValue, predProb #********************************************************************************
Example #26
Source File: common.py From typhon with MIT License | 5 votes |
def _iwp_model(self, processes, cv_folds): """Return the default model for the IWP regressor """ # Estimators are normally objects that have a fit and predict method # (e.g. MLPRegressor from sklearn). To make their training easier we # scale the input data in advance. With Pipeline objects from sklearn # we can combine such steps easily since they behave like an # estimator object as well. estimator = Pipeline([ # SVM or NN work better if we have scaled the data in the first # place. MinMaxScaler is the simplest one. RobustScaler or # StandardScaler could be an alternative. ("scaler", RobustScaler(quantile_range=(15, 85))), # The "real" estimator: ("estimator", MLPRegressor(max_iter=6000, early_stopping=True)), ]) # To optimize the results, we try different hyper parameters by # using a grid search hidden_layer_sizes = [ (15, 10, 3), #(50, 20), ] hyper_parameter = [ { # Hyper parameter for lbfgs solver 'estimator__solver': ['lbfgs'], 'estimator__activation': ['tanh'], 'estimator__hidden_layer_sizes': hidden_layer_sizes, 'estimator__random_state': [0, 42, 100, 3452], 'estimator__alpha': [0.1, 0.001, 0.0001], }, ] return GridSearchCV( estimator, hyper_parameter, refit=True, n_jobs=processes, cv=cv_folds, verbose=self.verbose, )
Example #27
Source File: SpectraLearnPredict.py From SpectralMachine with GNU General Public License v3.0 | 5 votes |
def predNN(clf, A, Cl, R): if nnDef.MLPRegressor is False: prob = clf.predict_proba(R)[0].tolist() rosterPred = np.where(clf.predict_proba(R)[0]>nnDef.thresholdProbabilityPred/100)[0] print('\n ==============================') print(' \033[1mNN\033[0m - Probability >',str(nnDef.thresholdProbabilityPred),'%') print(' ==============================') print(' Prediction\tProbability [%]') for i in range(rosterPred.shape[0]): print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*clf.predict_proba(R)[0][rosterPred][i]))) print(' ==============================') predValue = clf.predict(R)[0] predProb = round(100*max(prob),4) print('\033[1m' + '\n Predicted classifier value (Deep Neural Networks - sklearn) = ' + str(predValue) + ' (probability = ' + str(predProb) + '%)\033[0m\n') else: Cl = np.array(Cl,dtype=float) predValue = clf.predict(R)[0] predProb = clf.score(A,Cl) print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - sklearn) = ' + str('{:.3f}'.format(predValue)) + ' (R^2 = ' + str('{:.5f}'.format(predProb)) + ')\033[0m\n') #************************************** ''' Neural Networks Classification Report ''' #************************************** if nnDef.nnClassReport == True: print(' Neural Networks Classification Report\n') runClassReport(clf, A, Cl) #************************* ''' Plot probabilities ''' #************************* if plotDef.showProbPlot == True: if nnDef.MLPRegressor is False: plotProb(clf, R) return predValue, predProb #********************************************************************************
Example #28
Source File: neural_net_trainer.py From relaxed_ik with MIT License | 5 votes |
def __init__(self, collision_graph, num_samples=300000): self.num_samples = num_samples self.cg = collision_graph self.inputs = [] self.outputs = [] self.robot = self.cg.robot self.bounds = self.cg.robot.bounds for i in xrange(num_samples): rvec = rand_vec(self.bounds) frames = self.robot.getFrames(rvec) score = self.cg.get_collision_score(frames) input = frames_to_jt_pt_vec(frames) self.inputs.append(input) self.outputs.append(score) print str(i) + ' of ' + str(num_samples) + ' samples' + ': ' + str(score) self.clf = MLPRegressor(solver='adam', alpha=1, hidden_layer_sizes=(70, 70, 70, 70, 70, 70), max_iter=300000, verbose=True, learning_rate='adaptive') self.clf.fit(self.inputs, self.outputs) self.output_comparisons()
Example #29
Source File: test_skl_to_pmml_UnitTest.py From nyoka with Apache License 2.0 | 5 votes |
def test_sklearn_57(self): df = pd.read_csv('nyoka/tests/auto-mpg.csv') X = df.drop(['mpg'], axis=1) y = df['mpg'] features = [name for name in df.columns if name not in ('mpg')] target = 'mpg' f_name = "mlpr_pmml.pmml" model = MLPRegressor() pipeline_obj = Pipeline([ ('mapper', DataFrameMapper([ ('car name', TfidfVectorizer()) ])), ('model', model) ]) pipeline_obj.fit(X, y) skl_to_pmml(pipeline_obj, features, target, f_name) pmml_obj = pml.parse(f_name, True) # 1 self.assertEqual(os.path.isfile(f_name), True) # 2 self.assertEqual(2, pmml_obj.NeuralNetwork[0].NeuralLayer.__len__()) # 3 self.assertEqual(NN_ACTIVATION_FUNCTION.RECTIFIER.value, pmml_obj.NeuralNetwork[0].activationFunction) # 4 self.assertEqual(300, pmml_obj.NeuralNetwork[0].NeuralInputs.numberOfInputs) for model_val, pmml_val in zip(model.intercepts_[0], pmml_obj.NeuralNetwork[0].NeuralLayer[0].Neuron): self.assertEqual("{:.16f}".format(model_val), "{:.16f}".format(pmml_val.bias))
Example #30
Source File: ml_regressor.py From rampy with GNU General Public License v2.0 | 5 votes |
def fit(self): """Scale data and train the model with the indicated algorithm. Do not forget to tune the hyperparameters. Parameters ---------- algorithm : String, "KernelRidge", "SVM", "LinearRegression", "Lasso", "ElasticNet", "NeuralNet", "BaggingNeuralNet", default = "SVM" """ self.X_scaler.fit(self.X_train) self.Y_scaler.fit(self.y_train) # scaling the data in all cases, it may not be used during the fit later self.X_train_sc = self.X_scaler.transform(self.X_train) self.y_train_sc = self.Y_scaler.transform(self.y_train) self.X_test_sc = self.X_scaler.transform(self.X_test) self.y_test_sc = self.Y_scaler.transform(self.y_test) if self.algorithm == "KernelRidge": clf_kr = KernelRidge(kernel=self.user_kernel) self.model = sklearn.model_selection.GridSearchCV(clf_kr, cv=5, param_grid=self.param_kr) elif self.algorithm == "SVM": clf_svm = SVR(kernel=self.user_kernel) self.model = sklearn.model_selection.GridSearchCV(clf_svm, cv=5, param_grid=self.param_svm) elif self.algorithm == "Lasso": clf_lasso = sklearn.linear_model.Lasso(alpha=0.1,random_state=self.rand_state) self.model = sklearn.model_selection.GridSearchCV(clf_lasso, cv=5, param_grid=dict(alpha=np.logspace(-5,5,30))) elif self.algorithm == "ElasticNet": clf_ElasticNet = sklearn.linear_model.ElasticNet(alpha=0.1, l1_ratio=0.5,random_state=self.rand_state) self.model = sklearn.model_selection.GridSearchCV(clf_ElasticNet,cv=5, param_grid=dict(alpha=np.logspace(-5,5,30))) elif self.algorithm == "LinearRegression": self.model = sklearn.linear_model.LinearRegression() elif self.algorithm == "NeuralNet": self.model = MLPRegressor(**self.param_neurons) elif self.algorithm == "BaggingNeuralNet": nn_m = MLPRegressor(**self.param_neurons) self.model = BaggingRegressor(base_estimator = nn_m, **self.param_bag) if self.scaling == True: self.model.fit(self.X_train_sc, self.y_train_sc.reshape(-1,)) predict_train_sc = self.model.predict(self.X_train_sc) self.prediction_train = self.Y_scaler.inverse_transform(predict_train_sc.reshape(-1,1)) predict_test_sc = self.model.predict(self.X_test_sc) self.prediction_test = self.Y_scaler.inverse_transform(predict_test_sc.reshape(-1,1)) else: self.model.fit(self.X_train, self.y_train.reshape(-1,)) self.prediction_train = self.model.predict(self.X_train) self.prediction_test = self.model.predict(self.X_test)