Python sklearn.neural_network.MLPClassifier() Examples
The following are 30
code examples of sklearn.neural_network.MLPClassifier().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.neural_network
, or try the search function
.
Example #1
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 7 votes |
def test_alpha(): # Test that larger alpha yields weights closer to zero X = X_digits_binary[:100] y = y_digits_binary[:100] alpha_vectors = [] alpha_values = np.arange(2) absolute_sum = lambda x: np.sum(np.abs(x)) for alpha in alpha_values: mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1) with ignore_warnings(category=ConvergenceWarning): mlp.fit(X, y) alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]), absolute_sum(mlp.coefs_[1])])) for i in range(len(alpha_values) - 1): assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
Example #2
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 7 votes |
def test_lbfgs_classification(): # Test lbfgs on classification. # It should achieve a score higher than 0.95 for the binary and multi-class # versions of the digits dataset. for X, y in classification_datasets: X_train = X[:150] y_train = y[:150] X_test = X[150:] expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind) for activation in ACTIVATION_TYPES: mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, max_iter=150, shuffle=True, random_state=1, activation=activation) mlp.fit(X_train, y_train) y_predict = mlp.predict(X_test) assert_greater(mlp.score(X_train, y_train), 0.95) assert_equal((y_predict.shape[0], y_predict.dtype.kind), expected_shape_dtype)
Example #3
Source File: TMDetection.py From US-TransportationMode with MIT License | 6 votes |
def neural_network(self, sensors_set): features = list(self.dataset.get_sensors_set_features(sensors_set)) print("NEURAL NETWORK.....") print("CLASSIFICATION BASED ON THESE SENSORS: ", self.dataset.get_remained_sensors(sensors_set)) print("NUMBER OF FEATURES: ", len(features)) train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification( self.dataset.get_train, self.dataset.get_test, features) train_features_scaled, test_features_scaled = util.scale_features(train_features, test_features) classifier_nn = MLPClassifier(hidden_layer_sizes=(const.PAR_NN_NEURONS[sensors_set],), alpha=const.PAR_NN_ALPHA[sensors_set], max_iter=const.PAR_NN_MAX_ITER, tol=const.PAR_NN_TOL) classifier_nn.fit(train_features_scaled, train_classes) test_prediction = classifier_nn.predict(test_features_scaled) acc = accuracy_score(test_classes, test_prediction) print("ACCURACY : " + str(acc)) print("END NEURAL NETWORK") if not os.path.exists(const.DIR_RESULTS): os.makedirs(const.DIR_RESULTS) file_content = "acc\n" + str(acc) with open(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_NEURAL_NETWORK_RESULTS, 'w') as f: f.write(file_content) # support vector machine algorithm training on training al train set and test on all test set
Example #4
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_learning_rate_warmstart(): # Tests that warm_start reuse past solutions. X = [[3, 2], [1, 6], [5, 6], [-2, -4]] y = [1, 1, 1, 0] for learning_rate in ["invscaling", "constant"]: mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4, learning_rate=learning_rate, max_iter=1, power_t=0.25, warm_start=True) with ignore_warnings(category=ConvergenceWarning): mlp.fit(X, y) prev_eta = mlp._optimizer.learning_rate mlp.fit(X, y) post_eta = mlp._optimizer.learning_rate if learning_rate == 'constant': assert_equal(prev_eta, post_eta) elif learning_rate == 'invscaling': assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t), post_eta)
Example #5
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_multilabel_classification(): # Test that multi-label classification works as expected. # test fit method X, y = make_multilabel_classification(n_samples=50, random_state=0, return_indicator=True) mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5, max_iter=150, random_state=0, activation='logistic', learning_rate_init=0.2) mlp.fit(X, y) assert_greater(mlp.score(X, y), 0.97) # test partial fit method mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150, random_state=0, activation='logistic', alpha=1e-5, learning_rate_init=0.2) for i in range(100): mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4]) assert_greater(mlp.score(X, y), 0.9) # Make sure early stopping still work now that spliting is stratified by # default (it is disabled for multilabel classification) mlp = MLPClassifier(early_stopping=True) mlp.fit(X, y).predict(X)
Example #6
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_partial_fit_classification(): # Test partial_fit on classification. # `partial_fit` should yield the same results as 'fit' for binary and # multi-class classification. for X, y in classification_datasets: X = X y = y mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1, tol=0, alpha=1e-5, learning_rate_init=0.2) with ignore_warnings(category=ConvergenceWarning): mlp.fit(X, y) pred1 = mlp.predict(X) mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5, learning_rate_init=0.2) for i in range(100): mlp.partial_fit(X, y, classes=np.unique(y)) pred2 = mlp.predict(X) assert_array_equal(pred1, pred2) assert_greater(mlp.score(X, y), 0.95)
Example #7
Source File: test_uncertainty.py From cxplain with MIT License | 6 votes |
def test_mnist_confidence_levels_valid(self): num_subsamples = 100 (x_train, y_train), (x_test, y_test) = TestUtil.get_mnist(flattened=False, num_subsamples=num_subsamples) explained_model = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(64, 32), random_state=1) explained_model.fit(x_train.reshape((len(x_train), -1)), y_train) model_builder = MLPModelBuilder(num_layers=2, num_units=64, activation="relu", p_dropout=0.2, verbose=0, batch_size=256, learning_rate=0.001, num_epochs=3, early_stopping_patience=128) masking_operation = ZeroMasking() loss = categorical_crossentropy confidence_levels = [0.0, 1.0, 1.01, -0.01] for confidence_level in confidence_levels: downsample_factor = (2, 2) explainer = CXPlain(explained_model, model_builder, masking_operation, loss, num_models=2, downsample_factors=downsample_factor, flatten_for_explained_model=True) explainer.fit(x_train, y_train) with self.assertRaises(ValueError): _ = explainer.predict(x_test, confidence_level=confidence_level)
Example #8
Source File: net.py From color_recognizer with MIT License | 6 votes |
def learn(): print('Loading previous dataset to learn') n_files = 0 training_set = list() training_labels = list() for file in os.listdir(data_dir): if file.endswith(".jpg"): img_file = os.path.join(data_dir, file) label_name = str(file).split('_') training_set.append(cv2.imread(img_file, 1).reshape(6912)) training_labels.append(label_name[0]) n_files += 1 x = training_set y = tools.integerize(training_labels) net = MLPClassifier() print('\nLearning...\n') net.fit(x, y) print('MLP has already learned previous instances') return net
Example #9
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_predict_proba_binary(): # Test that predict_proba works as expected for binary class. X = X_digits_binary[:50] y = y_digits_binary[:50] clf = MLPClassifier(hidden_layer_sizes=5, activation='logistic', random_state=1) with ignore_warnings(category=ConvergenceWarning): clf.fit(X, y) y_proba = clf.predict_proba(X) y_log_proba = clf.predict_log_proba(X) (n_samples, n_classes) = y.shape[0], 2 proba_max = y_proba.argmax(axis=1) proba_log_max = y_log_proba.argmax(axis=1) assert_equal(y_proba.shape, (n_samples, n_classes)) assert_array_equal(proba_max, proba_log_max) assert_array_equal(y_log_proba, np.log(y_proba)) assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
Example #10
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_predict_proba_multilabel(): # Test that predict_proba works as expected for multilabel. # Multilabel should not use softmax which makes probabilities sum to 1 X, Y = make_multilabel_classification(n_samples=50, random_state=0, return_indicator=True) n_samples, n_classes = Y.shape clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30, random_state=0) clf.fit(X, Y) y_proba = clf.predict_proba(X) assert_equal(y_proba.shape, (n_samples, n_classes)) assert_array_equal(y_proba > 0.5, Y) y_log_proba = clf.predict_log_proba(X) proba_max = y_proba.argmax(axis=1) proba_log_max = y_log_proba.argmax(axis=1) assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10) assert_array_equal(proba_max, proba_log_max) assert_array_equal(y_log_proba, np.log(y_proba))
Example #11
Source File: test_explanation_model.py From cxplain with MIT License | 6 votes |
def test_time_series_valid(self): num_samples = 1024 fixed_length = 99 (x_train, y_train), (x_test, y_test) = TestUtil.get_random_fixed_length_dataset(num_samples=num_samples, fixed_length=fixed_length) model_builder = RNNModelBuilder(with_embedding=False, num_layers=2, num_units=32, activation="relu", p_dropout=0.2, verbose=0, batch_size=32, learning_rate=0.001, num_epochs=2, early_stopping_patience=128) explained_model = MLPClassifier() explained_model.fit(x_train.reshape((-1, np.prod(x_train.shape[1:]))), y_train) masking_operation = ZeroMasking() loss = binary_crossentropy explainer = CXPlain(explained_model, model_builder, masking_operation, loss, flatten_for_explained_model=True) explainer.fit(x_train, y_train) eval_score = explainer.score(x_test, y_test) train_score = explainer.get_last_fit_score() median = explainer.predict(x_test) self.assertTrue(median.shape == x_test.shape)
Example #12
Source File: test_explanation_model.py From cxplain with MIT License | 6 votes |
def test_mnist_unet_valid(self): num_subsamples = 100 (x_train, y_train), (x_test, y_test) = TestUtil.get_mnist(flattened=False, num_subsamples=num_subsamples) explained_model = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(64, 32), random_state=1) explained_model.fit(x_train.reshape((len(x_train), -1)), y_train) masking_operation = ZeroMasking() loss = categorical_crossentropy downsample_factors = [(2, 2), (4, 4), (4, 7), (7, 4), (7, 7)] with_bns = [True if i % 2 == 0 else False for i in range(len(downsample_factors))] for downsample_factor, with_bn in zip(downsample_factors, with_bns): model_builder = UNetModelBuilder(downsample_factor, num_layers=2, num_units=64, activation="relu", p_dropout=0.2, verbose=0, batch_size=256, learning_rate=0.001, num_epochs=2, early_stopping_patience=128, with_bn=with_bn) explainer = CXPlain(explained_model, model_builder, masking_operation, loss, downsample_factors=downsample_factor, flatten_for_explained_model=True) explainer.fit(x_train, y_train) eval_score = explainer.score(x_test, y_test) train_score = explainer.get_last_fit_score() median = explainer.predict(x_test) self.assertTrue(median.shape == x_test.shape)
Example #13
Source File: MalGAN__v3.py From Malware-GAN with GNU General Public License v3.0 | 6 votes |
def build_blackbox_detector(self): if self.blackbox is 'RF': blackbox_detector = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=1) elif self.blackbox is 'SVM': blackbox_detector = svm.SVC() elif self.blackbox is 'LR': blackbox_detector = linear_model.LogisticRegression() elif self.blackbox is 'DT': blackbox_detector = tree.DecisionTreeRegressor() elif self.blackbox is 'MLP': blackbox_detector = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4, solver='sgd', verbose=0, tol=1e-4, random_state=1, learning_rate_init=.1) elif self.blackbox is 'VOTE': blackbox_detector = VOTEClassifier() return blackbox_detector
Example #14
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_warm_start(): X = X_iris y = y_iris y_2classes = np.array([0] * 75 + [1] * 75) y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70) y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50) y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38) y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30) # No error raised clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs', warm_start=True).fit(X, y) clf.fit(X, y) clf.fit(X, y_3classes) for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes): clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs', warm_start=True).fit(X, y) message = ('warm_start can only be used where `y` has the same ' 'classes as in the previous call to fit.' ' Previously got [0 1 2], `y` has %s' % np.unique(y_i)) assert_raise_message(ValueError, message, clf.fit, X, y_i)
Example #15
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_n_iter_no_change(): # test n_iter_no_change using binary data set # the classifying fitting process is not prone to loss curve fluctuations X = X_digits_binary[:100] y = y_digits_binary[:100] tol = 0.01 max_iter = 3000 # test multiple n_iter_no_change for n_iter_no_change in [2, 5, 10, 50, 100]: clf = MLPClassifier(tol=tol, max_iter=max_iter, solver='sgd', n_iter_no_change=n_iter_no_change) clf.fit(X, y) # validate n_iter_no_change assert_equal(clf._no_improvement_count, n_iter_no_change + 1) assert_greater(max_iter, clf.n_iter_)
Example #16
Source File: test_mlp.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_n_iter_no_change_inf(): # test n_iter_no_change using binary data set # the fitting process should go to max_iter iterations X = X_digits_binary[:100] y = y_digits_binary[:100] # set a ridiculous tolerance # this should always trigger _update_no_improvement_count() tol = 1e9 # fit n_iter_no_change = np.inf max_iter = 3000 clf = MLPClassifier(tol=tol, max_iter=max_iter, solver='sgd', n_iter_no_change=n_iter_no_change) clf.fit(X, y) # validate n_iter_no_change doesn't cause early stopping assert_equal(clf.n_iter_, max_iter) # validate _update_no_improvement_count() was always triggered assert_equal(clf._no_improvement_count, clf.n_iter_ - 1)
Example #17
Source File: classifier.py From libfaceid with MIT License | 6 votes |
def __init__(self, classifier=FaceClassifierModels.DEFAULT): self._clf = None if classifier == FaceClassifierModels.LINEAR_SVM: self._clf = SVC(C=1.0, kernel="linear", probability=True) elif classifier == FaceClassifierModels.NAIVE_BAYES: self._clf = GaussianNB() elif classifier == FaceClassifierModels.RBF_SVM: self._clf = SVC(C=1, kernel='rbf', probability=True, gamma=2) elif classifier == FaceClassifierModels.NEAREST_NEIGHBORS: self._clf = KNeighborsClassifier(1) elif classifier == FaceClassifierModels.DECISION_TREE: self._clf = DecisionTreeClassifier(max_depth=5) elif classifier == FaceClassifierModels.RANDOM_FOREST: self._clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1) elif classifier == FaceClassifierModels.NEURAL_NET: self._clf = MLPClassifier(alpha=1) elif classifier == FaceClassifierModels.ADABOOST: self._clf = AdaBoostClassifier() elif classifier == FaceClassifierModels.QDA: self._clf = QuadraticDiscriminantAnalysis() print("classifier={}".format(FaceClassifierModels(classifier)))
Example #18
Source File: test_pipeline.py From lale with Apache License 2.0 | 6 votes |
def test_fit2(self): import warnings warnings.filterwarnings(action="ignore") from lale.lib.sklearn import MinMaxScaler, MLPClassifier pipeline = Batching(operator = MinMaxScaler() >> MinMaxScaler(), batch_size = 112) trained = pipeline.fit(self.X_train, self.y_train) lale_transforms = trained.transform(self.X_test) from sklearn.preprocessing import MinMaxScaler prep = MinMaxScaler() trained_prep = prep.partial_fit(self.X_train, self.y_train) X_transformed = trained_prep.transform(self.X_train) clf = MinMaxScaler() import numpy as np trained_clf = clf.partial_fit(X_transformed, self.y_train) sklearn_transforms = trained_clf.transform(trained_prep.transform(self.X_test)) for i in range(5): for j in range(2): self.assertAlmostEqual(lale_transforms[i, j], sklearn_transforms[i, j])
Example #19
Source File: test_pipeline.py From lale with Apache License 2.0 | 6 votes |
def test_fit1(self): import warnings warnings.filterwarnings(action="ignore") from lale.lib.sklearn import MinMaxScaler, MLPClassifier pipeline = Batching(operator = MinMaxScaler() >> MLPClassifier(random_state=42), batch_size = 112) trained = pipeline.fit(self.X_train, self.y_train) predictions = trained.predict(self.X_test) lale_accuracy = accuracy_score(self.y_test, predictions) from sklearn.preprocessing import MinMaxScaler from sklearn.neural_network import MLPClassifier prep = MinMaxScaler() trained_prep = prep.partial_fit(self.X_train, self.y_train) X_transformed = trained_prep.transform(self.X_train) clf = MLPClassifier(random_state=42) import numpy as np trained_clf = clf.partial_fit(X_transformed, self.y_train, classes = np.unique(self.y_train)) predictions = trained_clf.predict(trained_prep.transform(self.X_test)) sklearn_accuracy = accuracy_score(self.y_test, predictions) self.assertEqual(lale_accuracy, sklearn_accuracy)
Example #20
Source File: test_pipeline.py From lale with Apache License 2.0 | 6 votes |
def test_fit(self): import warnings warnings.filterwarnings(action="ignore") from lale.lib.sklearn import MinMaxScaler, MLPClassifier pipeline = NoOp() >> Batching(operator = MinMaxScaler() >> MLPClassifier(random_state=42), batch_size = 112) trained = pipeline.fit(self.X_train, self.y_train) predictions = trained.predict(self.X_test) lale_accuracy = accuracy_score(self.y_test, predictions) from sklearn.preprocessing import MinMaxScaler from sklearn.neural_network import MLPClassifier prep = MinMaxScaler() trained_prep = prep.partial_fit(self.X_train, self.y_train) X_transformed = trained_prep.transform(self.X_train) clf = MLPClassifier(random_state=42) import numpy as np trained_clf = clf.partial_fit(X_transformed, self.y_train, classes = np.unique(self.y_train)) predictions = trained_clf.predict(trained_prep.transform(self.X_test)) sklearn_accuracy = accuracy_score(self.y_test, predictions) self.assertEqual(lale_accuracy, sklearn_accuracy)
Example #21
Source File: MLP_nets.py From DeepLearning_IDS with MIT License | 6 votes |
def __init__(self): #MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False, # epsilon=1e-08, hidden_layer_sizes=(5, 2), learning_rate='constant', # learning_rate_init=0.001, max_iter=200, momentum=0.9, # nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True, # solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False, # warm_start=False) #hidden_layer = 1x (# hidden layers - 2) . Each value: units in the hidden layer # With either algorithm as solver: # - Stochastic Gradient Descent # - Adam: refers to a stochastic gradient-based optimizer proposed by Kingma, Diederik, and Jimmy Ba # --> works pretty well on relatively large datasets (with thousands of training samples or more) in terms of both training time and validation score. # - L-BFGS: optimizer in the family of quasi-Newton methods. # --> For small datasets, however, ‘lbfgs’ can converge faster and perform better. self.classifier = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(64), random_state=1, max_iter = 1500, verbose = True)
Example #22
Source File: testScoreWithAdapaSklearn.py From nyoka with Apache License 2.0 | 6 votes |
def test_38_mlp_classifier(self): print("\ntest 38 (mlp classifier without preprocessing)[multi-class]\n") X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification() model = MLPClassifier() pipeline_obj = Pipeline([ ("model", model) ]) pipeline_obj.fit(X,y) file_name = 'test38sklearn.pmml' skl_to_pmml(pipeline_obj, features, target, file_name) model_name = self.adapa_utility.upload_to_zserver(file_name) predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file) model_pred = pipeline_obj.predict(X_test) model_prob = pipeline_obj.predict_proba(X_test) self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
Example #23
Source File: testScoreWithAdapaSklearn.py From nyoka with Apache License 2.0 | 6 votes |
def test_39_mlp_classifier(self): print("\ntest 39 (mlp classifier without preprocessing)[binary-class]\n") X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification() model = MLPClassifier() pipeline_obj = Pipeline([ ("model", model) ]) pipeline_obj.fit(X,y) file_name = 'test39sklearn.pmml' skl_to_pmml(pipeline_obj, features, target, file_name) model_name = self.adapa_utility.upload_to_zserver(file_name) predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file) model_pred = pipeline_obj.predict(X_test) model_prob = pipeline_obj.predict_proba(X_test) self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
Example #24
Source File: exp.py From Malware-GAN with GNU General Public License v3.0 | 5 votes |
def build_blackbox_detector(self): if self.blackbox is 'MLP': blackbox_detector = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4, solver='sgd', verbose=0, tol=1e-4, random_state=1, learning_rate_init=.1) return blackbox_detector
Example #25
Source File: test_pipeline.py From lale with Apache License 2.0 | 5 votes |
def test_fit3(self): from lale.lib.sklearn import MinMaxScaler, MLPClassifier, PCA pipeline = PCA() >> Batching(operator = MinMaxScaler() >> MLPClassifier(random_state=42), batch_size = 10) trained = pipeline.fit(self.X_train, self.y_train) predictions = trained.predict(self.X_test)
Example #26
Source File: VOTEClassifier.py From Malware-GAN with GNU General Public License v3.0 | 5 votes |
def __init__(self): self.RF = RandomForestClassifier(n_estimators=50, max_depth=5, random_state=1) self.SVM = svm.SVC() self.LR = linear_model.LogisticRegression() self.DT = tree.DecisionTreeRegressor() self.MLP = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4, solver='sgd', verbose=0, tol=1e-4, random_state=1, learning_rate_init=.1) self.alpha = 0.2 * np.ones((5, ))
Example #27
Source File: trained_attack_models.py From privacy with Apache License 2.0 | 5 votes |
def mlp(verbose: int = 0, n_jobs: int = 1): """Setup a MLP pipeline with cross-validation.""" mlpmodel = neural_network.MLPClassifier() param_grid = { 'hidden_layer_sizes': [(64,), (32, 32)], 'solver': ['adam'], 'alpha': [0.0001, 0.001, 0.01], } pipe = model_selection.GridSearchCV( mlpmodel, param_grid=param_grid, cv=3, n_jobs=n_jobs, iid=False, verbose=verbose) return pipe
Example #28
Source File: test_event_classifier.py From ctapipe with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_prepare_model_MLP(): cam_id_list = ["FlashCam", "ASTRICam"] feature_list = { "FlashCam": [ [1, 10], [2, 20], [3, 30], [0.9, 9], [10, 1], [20, 2], [30, 3], [9, 0.9], ], "ASTRICam": [ [10, 1], [20, 2], [30, 3], [9, 0.9], [1, 10], [2, 20], [3, 30], [0.9, 9], ], } target_list = { "FlashCam": ["a", "a", "a", "a", "b", "b", "b", "b"], "ASTRICam": ["a", "a", "a", "a", "b", "b", "b", "b"], } clf = EventClassifier( classifier=MLPClassifier, cam_id_list=cam_id_list, max_iter=400 ) scaled_features, scaler = EventClassifier.scale_features(cam_id_list, feature_list) # clf.fit(feature_list, target_list) clf.fit(scaled_features, target_list) return clf, cam_id_list, scaler
Example #29
Source File: MLP_nets.py From DeepLearning_IDS with MIT License | 5 votes |
def __init__(self, *layers, a =1e-5, max_i = 1500): self.classifier = MLPClassifier(solver='adam', alpha=a, *layers, random_state=1, max_iter = max_i, verbose = True)
Example #30
Source File: mp_train.py From atap with Apache License 2.0 | 5 votes |
def fit_multilayer_perceptron(path, saveto=None, cv=12): model = Pipeline([ ('norm', TextNormalizer()), ('tfidf', TfidfVectorizer(tokenizer=identity, lowercase=False)), ('clf', MLPClassifier(hidden_layer_sizes=(10,10), early_stopping=True)) ]) if saveto is None: saveto = "multilayer_perceptron_{}.pkl".format(time.time()) scores, delta = train_model(path, model, saveto, cv) logger.info(( "multilayer perceptron training took {:0.2f} seconds " "with an average score of {:0.3f}" ).format(delta, scores.mean()))