Python sklearn.exceptions.ConvergenceWarning() Examples

The following are 30 code examples of sklearn.exceptions.ConvergenceWarning(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.exceptions , or try the search function .
Example #1
Source File: test_mlp.py    From Mastering-Elasticsearch-7.0 with MIT License 7 votes vote down vote up
def test_alpha():
    # Test that larger alpha yields weights closer to zero
    X = X_digits_binary[:100]
    y = y_digits_binary[:100]

    alpha_vectors = []
    alpha_values = np.arange(2)
    absolute_sum = lambda x: np.sum(np.abs(x))

    for alpha in alpha_values:
        mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
        with ignore_warnings(category=ConvergenceWarning):
            mlp.fit(X, y)
        alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
                                       absolute_sum(mlp.coefs_[1])]))

    for i in range(len(alpha_values) - 1):
        assert (alpha_vectors[i] > alpha_vectors[i + 1]).all() 
Example #2
Source File: test_ransac.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_ransac_warn_exceed_max_skips():
    global cause_skip
    cause_skip = False

    def is_data_valid(X, y):
        global cause_skip
        if not cause_skip:
            cause_skip = True
            return True
        else:
            return False

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator,
                                       is_data_valid=is_data_valid,
                                       max_skips=3,
                                       max_trials=5)

    assert_warns(ConvergenceWarning, ransac_estimator.fit, X, y)
    assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
    assert_equal(ransac_estimator.n_skips_invalid_model_, 0) 
Example #3
Source File: test_k_means.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_less_centers_than_unique_points():
    X = np.asarray([[0, 0],
                    [0, 1],
                    [1, 0],
                    [1, 0]])  # last point is duplicated

    km = KMeans(n_clusters=4).fit(X)

    # only three distinct points, so only three clusters
    # can have points assigned to them
    assert_equal(set(km.labels_), set(range(3)))

    # k_means should warn that fewer labels than cluster
    # centers have been used
    msg = ("Number of distinct clusters (3) found smaller than "
           "n_clusters (4). Possibly due to duplicate points in X.")
    assert_warns_message(ConvergenceWarning, msg, k_means, X,
                         sample_weight=None, n_clusters=4) 
Example #4
Source File: test_fastica.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_fastica_convergence_fail():
    # Test the FastICA algorithm on very simple data
    # (see test_non_square_fastica).
    # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.
    rng = np.random.RandomState(0)

    n_samples = 1000
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing matrix
    mixing = rng.randn(6, 2)
    m = np.dot(mixing, s)

    # Do fastICA with tolerance 0. to ensure failing convergence
    ica = FastICA(algorithm="parallel", n_components=2, random_state=rng,
                  max_iter=2, tol=0.)
    assert_warns(ConvergenceWarning, ica.fit, m.T) 
Example #5
Source File: test_least_angle.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_lars_cv_max_iter(recwarn):
    warnings.simplefilter('always')
    with np.errstate(divide='raise', invalid='raise'):
        X = diabetes.data
        y = diabetes.target
        rng = np.random.RandomState(42)
        x = rng.randn(len(y))
        X = diabetes.data
        X = np.c_[X, x, x]  # add correlated features
        lars_cv = linear_model.LassoLarsCV(max_iter=5, cv=5)
        lars_cv.fit(X, y)
    # Check that there is no warning in general and no ConvergenceWarning
    # in particular.
    # Materialize the string representation of the warning to get a more
    # informative error message in case of AssertionError.
    recorded_warnings = [str(w) for w in recwarn]
    assert recorded_warnings == [] 
Example #6
Source File: test_mlp.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_predict_proba_binary():
    # Test that predict_proba works as expected for binary class.
    X = X_digits_binary[:50]
    y = y_digits_binary[:50]

    clf = MLPClassifier(hidden_layer_sizes=5)
    with ignore_warnings(category=ConvergenceWarning):
        clf.fit(X, y)
    y_proba = clf.predict_proba(X)
    y_log_proba = clf.predict_log_proba(X)

    (n_samples, n_classes) = y.shape[0], 2

    proba_max = y_proba.argmax(axis=1)
    proba_log_max = y_log_proba.argmax(axis=1)

    assert_equal(y_proba.shape, (n_samples, n_classes))
    assert_array_equal(proba_max, proba_log_max)
    assert_array_equal(y_log_proba, np.log(y_proba))

    assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0) 
Example #7
Source File: test_mlp.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_partial_fit_classification():
    # Test partial_fit on classification.
    # `partial_fit` should yield the same results as 'fit' for binary and
    # multi-class classification.
    for X, y in classification_datasets:
        X = X
        y = y
        mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
                            tol=0, alpha=1e-5, learning_rate_init=0.2)

        with ignore_warnings(category=ConvergenceWarning):
            mlp.fit(X, y)
        pred1 = mlp.predict(X)
        mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
                            learning_rate_init=0.2)
        for i in range(100):
            mlp.partial_fit(X, y, classes=np.unique(y))
        pred2 = mlp.predict(X)
        assert_array_equal(pred1, pred2)
        assert_greater(mlp.score(X, y), 0.95) 
Example #8
Source File: test_gaussian_mixture.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_gaussian_mixture_fit_convergence_warning():
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=1)
    n_components = rand_data.n_components
    max_iter = 1
    for covar_type in COVARIANCE_TYPE:
        X = rand_data.X[covar_type]
        g = GaussianMixture(n_components=n_components, n_init=1,
                            max_iter=max_iter, reg_covar=0, random_state=rng,
                            covariance_type=covar_type)
        assert_warns_message(ConvergenceWarning,
                             'Initialization %d did not converge. '
                             'Try different init parameters, '
                             'or increase max_iter, tol '
                             'or check for degenerate data.'
                             % max_iter, g.fit, X) 
Example #9
Source File: test_mlp.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_learning_rate_warmstart():
    # Tests that warm_start reuse past solutions.
    X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
    y = [1, 1, 1, 0]
    for learning_rate in ["invscaling", "constant"]:
        mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
                            learning_rate=learning_rate, max_iter=1,
                            power_t=0.25, warm_start=True)
        with ignore_warnings(category=ConvergenceWarning):
            mlp.fit(X, y)
            prev_eta = mlp._optimizer.learning_rate
            mlp.fit(X, y)
            post_eta = mlp._optimizer.learning_rate

        if learning_rate == 'constant':
            assert_equal(prev_eta, post_eta)
        elif learning_rate == 'invscaling':
            assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
                         post_eta) 
Example #10
Source File: test_gaussian_mixture.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_gaussian_mixture_fit_convergence_warning():
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=1)
    n_components = rand_data.n_components
    max_iter = 1
    for covar_type in COVARIANCE_TYPE:
        X = rand_data.X[covar_type]
        g = GaussianMixture(n_components=n_components, n_init=1,
                            max_iter=max_iter, reg_covar=0, random_state=rng,
                            covariance_type=covar_type)
        assert_warns_message(ConvergenceWarning,
                             'Initialization %d did not converge. '
                             'Try different init parameters, '
                             'or increase max_iter, tol '
                             'or check for degenerate data.'
                             % max_iter, g.fit, X) 
Example #11
Source File: test_mlp.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_alpha():
    # Test that larger alpha yields weights closer to zero
    X = X_digits_binary[:100]
    y = y_digits_binary[:100]

    alpha_vectors = []
    alpha_values = np.arange(2)
    absolute_sum = lambda x: np.sum(np.abs(x))

    for alpha in alpha_values:
        mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
        with ignore_warnings(category=ConvergenceWarning):
            mlp.fit(X, y)
        alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
                                       absolute_sum(mlp.coefs_[1])]))

    for i in range(len(alpha_values) - 1):
        assert (alpha_vectors[i] > alpha_vectors[i + 1]).all() 
Example #12
Source File: test_label_propagation.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_convergence_warning():
    # This is a non-regression test for #5774
    X = np.array([[1., 0.], [0., 1.], [1., 2.5]])
    y = np.array([0, 1, -1])
    mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=1)
    assert_warns(ConvergenceWarning, mdl.fit, X, y)
    assert_equal(mdl.n_iter_, mdl.max_iter)

    mdl = label_propagation.LabelPropagation(kernel='rbf', max_iter=1)
    assert_warns(ConvergenceWarning, mdl.fit, X, y)
    assert_equal(mdl.n_iter_, mdl.max_iter)

    mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=500)
    assert_no_warnings(mdl.fit, X, y)

    mdl = label_propagation.LabelPropagation(kernel='rbf', max_iter=500)
    assert_no_warnings(mdl.fit, X, y) 
Example #13
Source File: test_birch.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_n_clusters():
    # Test that n_clusters param works properly
    X, y = make_blobs(n_samples=100, centers=10)
    brc1 = Birch(n_clusters=10)
    brc1.fit(X)
    assert_greater(len(brc1.subcluster_centers_), 10)
    assert_equal(len(np.unique(brc1.labels_)), 10)

    # Test that n_clusters = Agglomerative Clustering gives
    # the same results.
    gc = AgglomerativeClustering(n_clusters=10)
    brc2 = Birch(n_clusters=gc)
    brc2.fit(X)
    assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
    assert_array_equal(brc1.labels_, brc2.labels_)

    # Test that the wrong global clustering step raises an Error.
    clf = ElasticNet()
    brc3 = Birch(n_clusters=clf)
    assert_raises(ValueError, brc3.fit, X)

    # Test that a small number of clusters raises a warning.
    brc4 = Birch(threshold=10000.)
    assert_warns(ConvergenceWarning, brc4.fit, X) 
Example #14
Source File: test_mlp.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_predict_proba_multiclass():
    # Test that predict_proba works as expected for multi class.
    X = X_digits_multi[:10]
    y = y_digits_multi[:10]

    clf = MLPClassifier(hidden_layer_sizes=5)
    with ignore_warnings(category=ConvergenceWarning):
        clf.fit(X, y)
    y_proba = clf.predict_proba(X)
    y_log_proba = clf.predict_log_proba(X)

    (n_samples, n_classes) = y.shape[0], np.unique(y).size

    proba_max = y_proba.argmax(axis=1)
    proba_log_max = y_log_proba.argmax(axis=1)

    assert_equal(y_proba.shape, (n_samples, n_classes))
    assert_array_equal(proba_max, proba_log_max)
    assert_array_equal(y_log_proba, np.log(y_proba)) 
Example #15
Source File: test_mlp.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_partial_fit_classification():
    # Test partial_fit on classification.
    # `partial_fit` should yield the same results as 'fit' for binary and
    # multi-class classification.
    for X, y in classification_datasets:
        X = X
        y = y
        mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
                            tol=0, alpha=1e-5, learning_rate_init=0.2)

        with ignore_warnings(category=ConvergenceWarning):
            mlp.fit(X, y)
        pred1 = mlp.predict(X)
        mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
                            learning_rate_init=0.2)
        for i in range(100):
            mlp.partial_fit(X, y, classes=np.unique(y))
        pred2 = mlp.predict(X)
        assert_array_equal(pred1, pred2)
        assert_greater(mlp.score(X, y), 0.95) 
Example #16
Source File: test_mlp.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_learning_rate_warmstart():
    # Tests that warm_start reuse past solutions.
    X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
    y = [1, 1, 1, 0]
    for learning_rate in ["invscaling", "constant"]:
        mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
                            learning_rate=learning_rate, max_iter=1,
                            power_t=0.25, warm_start=True)
        with ignore_warnings(category=ConvergenceWarning):
            mlp.fit(X, y)
            prev_eta = mlp._optimizer.learning_rate
            mlp.fit(X, y)
            post_eta = mlp._optimizer.learning_rate

        if learning_rate == 'constant':
            assert_equal(prev_eta, post_eta)
        elif learning_rate == 'invscaling':
            assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
                         post_eta) 
Example #17
Source File: test_standardization.py    From causallib with Apache License 2.0 6 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        import warnings
        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
                        ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
                        KNeighborsRegressor, SVR, LinearSVR]:
            learner = learner()
            learner_name = str(learner).split("(", maxsplit=1)[0]
            with self.subTest("Test fit using {learner}".format(learner=learner_name)):
                model = self.estimator.__class__(learner)
                model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
                self.assertTrue(True)  # Fit did not crash 
Example #18
Source File: test_label_propagation.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_convergence_warning():
    # This is a non-regression test for #5774
    X = np.array([[1., 0.], [0., 1.], [1., 2.5]])
    y = np.array([0, 1, -1])
    mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=1)
    assert_warns(ConvergenceWarning, mdl.fit, X, y)
    assert_equal(mdl.n_iter_, mdl.max_iter)

    mdl = label_propagation.LabelPropagation(kernel='rbf', max_iter=1)
    assert_warns(ConvergenceWarning, mdl.fit, X, y)
    assert_equal(mdl.n_iter_, mdl.max_iter)

    mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=500)
    assert_no_warnings(mdl.fit, X, y)

    mdl = label_propagation.LabelPropagation(kernel='rbf', max_iter=500)
    assert_no_warnings(mdl.fit, X, y) 
Example #19
Source File: test_gaussian_mixture.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_monotonic_likelihood():
    # We check that each step of the EM without regularization improve
    # monotonically the training set likelihood
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=7)
    n_components = rand_data.n_components

    for covar_type in COVARIANCE_TYPE:
        X = rand_data.X[covar_type]
        gmm = GaussianMixture(n_components=n_components,
                              covariance_type=covar_type, reg_covar=0,
                              warm_start=True, max_iter=1, random_state=rng,
                              tol=1e-7)
        current_log_likelihood = -np.infty
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", ConvergenceWarning)
            # Do one training iteration at a time so we can make sure that the
            # training log likelihood increases after each iteration.
            for _ in range(600):
                prev_log_likelihood = current_log_likelihood
                try:
                    current_log_likelihood = gmm.fit(X).score(X)
                except ConvergenceWarning:
                    pass
                assert_greater_equal(current_log_likelihood,
                                     prev_log_likelihood)

                if gmm.converged_:
                    break

            assert_true(gmm.converged_) 
Example #20
Source File: test_spherical_kmeans.py    From mvlearn with Apache License 2.0 5 votes vote down vote up
def test_predict_no_centroids2():
    kmeans = MultiviewSphericalKMeans()
    
    with pytest.raises(ConvergenceWarning):
        view1 = np.array([[0, 1], [1, 0]])
        view2 = np.array([[1, 0], [0, 1]])
        v1_centroids = np.array([[0, 1],[1, 0]])
        v2_centroids = np.array([[0, 1],[1, 0]])
        centroids = [v1_centroids, v2_centroids]
        kmeans._final_centroids([view1, view2], centroids)

    with pytest.raises(AttributeError):
        kmeans.predict([view1, view2]) 
Example #21
Source File: LinearRegression.py    From AVEC2018 with MIT License 5 votes vote down vote up
def linRegMult(datas, func, c, part, cMode, cSize):
	res = [func,c,[],[],{}, cMode, cSize]
	warnings.filterwarnings('ignore', category=ConvergenceWarning)
	#Getting the coefficient for each modality on Dev
	if (c != 0):
		reg = func[0](alpha=c)
	else :
		reg = func[0]()
	for nDim in range(len(v.eName)):
		for nMod in range(len(datas['dev'][nDim])):
			if (nMod == 0):
				preds = datas['dev'][nDim][nMod]
			else :
				preds = np.concatenate((preds,datas['dev'][nDim][nMod]),axis=1)
	reg.fit(preds,np.transpose(datas['gsdev']))
	res[2] = reg.coef_
	#Doing the new prediction
	cccs = []
	for nDim in range(len(v.eName)):
		cccs = []
		for s in part :
			if (res[4].get(s,None) == None):
				res[4][s] = []
			res[4][s].append(predMulti(reg.coef_,datas[s],nDim,1, cSize))
			cccs.append(round(cccCalc(res[4][s][nDim],datas['gs'+s][nDim]),3))
		res[3].append(cccs)
	return res
#End linRegMult 
Example #22
Source File: test_spherical_kmeans.py    From mvlearn with Apache License 2.0 5 votes vote down vote up
def test_final_centroids_less_than_n_clusters():
    with pytest.raises(ConvergenceWarning):
        kmeans = MultiviewSphericalKMeans(n_clusters=3, random_state=RANDOM_SEED)
        view1 = np.random.random((2,11))
        view2 = np.random.random((2,10))
        kmeans.fit([view1, view2]) 
Example #23
Source File: test_spherical_kmeans.py    From mvlearn with Apache License 2.0 5 votes vote down vote up
def test_final_centroids_less_than_n_clusters():
    with pytest.raises(ConvergenceWarning):
        kmeans = MultiviewSphericalKMeans(n_clusters=3, random_state=RANDOM_SEED)
        view1 = np.random.random((2,5))
        view2 = np.random.random((2,6))
        v1_centroids = np.random.random((3, 5))
        v2_centroids = np.random.random((3, 6))
        centroids = [v1_centroids, v2_centroids]
        kmeans._final_centroids([view1, view2], centroids) 
Example #24
Source File: test_spherical_kmeans.py    From mvlearn with Apache License 2.0 5 votes vote down vote up
def test_final_centroids_no_consensus():
    with pytest.raises(ConvergenceWarning):
        kmeans = MultiviewSphericalKMeans(random_state=RANDOM_SEED)
        view1 = np.array([[0, 1], [1, 0]])
        view2 = np.array([[1, 0], [0, 1]])
        v1_centroids = np.array([[0, 1],[1, 0]])
        v2_centroids = np.array([[0, 1],[1, 0]])
        centroids = [v1_centroids, v2_centroids]
        kmeans._final_centroids([view1, view2], centroids) 
Example #25
Source File: test_learning_shapelets.py    From pyts with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_convergence_warning(X, y, max_iter):
    """Test that the ConvergenceWarning is raised."""
    clf = CrossEntropyLearningShapelets(learning_rate=1e-3, tol=1e-9,
                                        random_state=42, max_iter=max_iter)
    msg = ('Maximum number of iterations reached without converging. Increase '
           'the maximum number of iterations.')
    with pytest.warns(ConvergenceWarning, match=msg):
        clf.fit(X, y) 
Example #26
Source File: test_nca.py    From scikit-hubness with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_convergence_warning():
    nca = NeighborhoodComponentsAnalysis(max_iter=2, verbose=1)
    cls_name = nca.__class__.__name__
    assert_warns_message(ConvergenceWarning,
                         '[{}] NCA did not converge'.format(cls_name),
                         nca.fit, iris_data, iris_target) 
Example #27
Source File: test_gm.py    From dislib with Apache License 2.0 5 votes vote down vote up
def test_fit_predict_vs_fit_and_predict(self):
        """Tests GaussianMixture fit_predict() eq. fit() and predict() for both
        converged and not converged runs (and a fixed random_state)."""
        x0 = np.random.normal(size=(1000, 2))
        x1 = np.random.normal(size=(2000, 2))
        x0 = np.dot(x0, [[1.2, 1], [0, 0.5]]) + [0, 3]
        x1 = np.dot(x1, [[0.4, 0], [1, 2.5]]) + [1, 0]
        x = np.concatenate((x0, x1))
        x_ds = ds.array(x, (1500, 2))

        # We check the cases with and without convergence
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", ConvergenceWarning)
            for max_iter, converges in ((5, False), (100, True)):
                gm1 = GaussianMixture(n_components=2, max_iter=max_iter,
                                      random_state=0)
                gm1.fit(x_ds)
                labels1 = gm1.predict(x_ds)

                gm2 = GaussianMixture(n_components=2, max_iter=max_iter,
                                      random_state=0)
                labels2 = gm2.fit_predict(x_ds)

                self.assertTrue(np.all(labels1.collect() == labels2.collect()))
                self.assertEqual(gm1.n_iter, gm2.n_iter)
                self.assertEqual(converges, gm1.converged_)
                self.assertEqual(gm1.converged_, gm2.converged_)
                self.assertEqual(gm1.lower_bound_, gm2.lower_bound_)

                gm1.weights_ = compss_wait_on(gm1.weights_)
                gm1.means_ = compss_wait_on(gm1.means_)
                gm1.covariances_ = compss_wait_on(gm1.covariances_)
                gm2.weights_ = compss_wait_on(gm2.weights_)
                gm2.means_ = compss_wait_on(gm2.means_)
                gm2.covariances_ = compss_wait_on(gm2.covariances_)

                self.assertTrue(np.all(gm1.weights_ == gm2.weights_))
                self.assertTrue(np.all(gm1.means_ == gm2.means_))
                self.assertTrue(np.all(gm1.covariances_ == gm2.covariances_)) 
Example #28
Source File: test_gm.py    From dislib with Apache License 2.0 5 votes vote down vote up
def test_not_converged_warning(self):
        """ Tests GaussianMixture warns when not converged """
        with self.assertWarns(ConvergenceWarning):
            x, _ = load_iris(return_X_y=True)
            x_ds = ds.array(x, (75, 4))
            gm = GaussianMixture(max_iter=1)
            gm.fit(x_ds) 
Example #29
Source File: test_doublyrobust.py    From causallib with Apache License 2.0 5 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
        from sklearn.neural_network import MLPClassifier
        from sklearn.neighbors import KNeighborsClassifier

        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        data = self.create_uninformative_ox_dataset()
        for propensity_learner in [GradientBoostingClassifier(n_estimators=10),
                                   RandomForestClassifier(n_estimators=100),
                                   MLPClassifier(hidden_layer_sizes=(5,)),
                                   KNeighborsClassifier(n_neighbors=20)]:
            weight_model = IPW(propensity_learner)
            propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0]
            for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10),
                                    MLPRegressor(hidden_layer_sizes=(5,)),
                                    ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(),
                                    KNeighborsRegressor(), SVR(), LinearSVR()]:
                outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
                outcome_model = Standardization(outcome_learner)

                with self.subTest("Test fit & predict using {} & {}".format(propensity_learner_name,
                                                                            outcome_learner_name)):
                    model = self.estimator.__class__(outcome_model, weight_model)
                    model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
                    model.estimate_individual_outcome(data["X"], data["a"])
                    self.assertTrue(True)  # Fit did not crash 
Example #30
Source File: base.py    From sparsereg with MIT License 5 votes vote down vote up
def _reduce(self, x, y):
        """Iterates the thresholding. Assumes an initial guess is saved in self.coef_ and self.ind_"""
        ind = self.ind_
        n_samples, n_features = x.shape
        n_features_selected = sum(ind)

        for _ in range(self.iters, self.max_iter):
            if np.count_nonzero(ind) == 0:
                warnings.warn(
                    "Sparsity parameter is too big ({}) and eliminated all coeficients".format(self.threshold)
                )
                coef = np.zeros_like(ind, dtype=float)
                break

            coef = self._regress(x[:, ind], y, self.alpha)
            coef, ind = self._sparse_coefficients(n_features, ind, coef, self.threshold)

            if sum(ind) == n_features_selected or self._no_change():
                # could not (further) select important features
                break
        else:
            warnings.warn(
                "STRidge._reduce did not converge after {} iterations.".format(self.max_iter),
                ConvergenceWarning,
            )
            try:
                coef
            except NameError:
                coef = self.coef_
                warnings.warn("STRidge._reduce has no iterations left to determine coef", ConvergenceWarning)
        self.coef_ = coef
        self.ind_ = ind