Python sklearn.decomposition.fastica() Examples
The following are 5
code examples of sklearn.decomposition.fastica().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.decomposition
, or try the search function
.
Example #1
Source File: test_fastica.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_fastica_errors(): n_features = 3 n_samples = 10 rng = np.random.RandomState(0) X = rng.random_sample((n_samples, n_features)) w_init = rng.randn(n_features + 1, n_features + 1) assert_raises_regex(ValueError, 'max_iter should be greater than 1', FastICA, max_iter=0) assert_raises_regex(ValueError, r'alpha must be in \[1,2\]', fastica, X, fun_args={'alpha': 0}) assert_raises_regex(ValueError, 'w_init has invalid shape.+' r'should be \(3L?, 3L?\)', fastica, X, w_init=w_init) assert_raises_regex(ValueError, 'Invalid algorithm.+must be.+parallel.+or.+deflation', fastica, X, algorithm='pizza')
Example #2
Source File: test_fastica.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_non_square_fastica(add_noise=False): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) n_samples = 1000 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing matrix mixing = rng.randn(6, 2) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(6, n_samples) center_and_norm(m) k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng) s_ = s_.T # Check that the mixing model described in the docstring holds: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
Example #3
Source File: test_decomposition.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_fastica(self): iris = datasets.load_iris() df = pdml.ModelFrame(iris) result = df.decomposition.fastica(random_state=self.random_state) expected = decomposition.fastica(iris.data, random_state=self.random_state) self.assertEqual(len(result), 3) self.assertIsInstance(result[0], pdml.ModelFrame) tm.assert_index_equal(result[0].index, df.data.columns) self.assert_numpy_array_almost_equal(result[0].values, expected[0]) self.assertIsInstance(result[1], pdml.ModelFrame) self.assert_numpy_array_almost_equal(result[1].values, expected[1]) self.assertIsInstance(result[2], pdml.ModelFrame) tm.assert_index_equal(result[2].index, df.index) self.assert_numpy_array_almost_equal(result[2].values, expected[2]) result = df.decomposition.fastica(return_X_mean=True, random_state=self.random_state) expected = decomposition.fastica(iris.data, return_X_mean=True, random_state=self.random_state) self.assertEqual(len(result), 4) self.assertIsInstance(result[0], pdml.ModelFrame) tm.assert_index_equal(result[0].index, df.data.columns) self.assert_numpy_array_almost_equal(result[0].values, expected[0]) self.assertIsInstance(result[1], pdml.ModelFrame) self.assert_numpy_array_almost_equal(result[1].values, expected[1]) self.assertIsInstance(result[2], pdml.ModelFrame) tm.assert_index_equal(result[2].index, df.index) self.assert_numpy_array_almost_equal(result[2].values, expected[2]) self.assert_numpy_array_almost_equal(result[3], expected[3])
Example #4
Source File: test_fastica.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_non_square_fastica(add_noise=False): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) n_samples = 1000 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing matrix mixing = rng.randn(6, 2) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(6, n_samples) center_and_norm(m) k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng) s_ = s_.T # Check that the mixing model described in the docstring holds: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
Example #5
Source File: lingam.py From monasca-analytics with Apache License 2.0 | 4 votes |
def _discover_structure(data): # Add a random noise uniformly distributed to avoid singularity # when performing the ICA data += np.random.random_sample(data.shape) # Create the ICA node to get the inverse of the mixing matrix k, w, _ = decomposition.fastica(data) w = np.dot(w, k) n = w.shape[0] best_nzd = float("inf") best_slt = float("inf") best_w_permuted = w causality_matrix = None causal_perm = None if n < 9: perm = LiNGAM._perms(n) for i in range(perm.shape[1]): perm_matrix = np.eye(n) perm_matrix = perm_matrix[:, perm[:, i]] w_permuted = perm_matrix.dot(w) cost = LiNGAM._cost_non_zero_diag(w_permuted) if cost < best_nzd: best_nzd = cost best_w_permuted = w_permuted w_opt = best_w_permuted w_opt = w_opt / np.diag(w_opt).reshape((n, 1)) b_matrix = np.eye(n) - w_opt best_b_permuted = b_matrix best_i = 0 for i in range(perm.shape[1]): b_permuted = b_matrix[:, perm[:, i]][perm[:, i], :] cost = LiNGAM._cost_strictly_lower_triangular( b_permuted) if cost < best_slt: best_slt = cost best_i = i best_b_permuted = b_permuted causal_perm = perm[:, best_i] causality_matrix = b_matrix percent_upper = best_slt / np.sum(best_b_permuted ** 2) if percent_upper > 0.2: # TODO(David): Change that code to raise an exception instead logger.error("LiNGAM failed to run on the data set") logger.error( "--> B permuted matrix is at best {}% lower triangular" .format(percent_upper)) return causality_matrix, causal_perm