Python scipy.linalg.hadamard() Examples
The following are 10
code examples of scipy.linalg.hadamard().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.linalg
, or try the search function
.
Example #1
Source File: hadamard.py From landmark-recognition-challenge with GNU General Public License v3.0 | 6 votes |
def build(self, input_shape): hadamard_size = 2 ** int(math.ceil(math.log(max(input_shape[1], self.output_dim), 2))) self.hadamard = K.constant( value=hadamard(hadamard_size, dtype=np.int8)[:input_shape[1], :self.output_dim]) init_scale = 1. / math.sqrt(self.output_dim) self.scale = self.add_weight(name='scale', shape=(1,), initializer=Constant(init_scale), trainable=True) if self.use_bias: self.bias = self.add_weight(name='bias', shape=(self.output_dim,), initializer=RandomUniform(-init_scale, init_scale), trainable=True) super(HadamardClassifier, self).build(input_shape)
Example #2
Source File: learning_hadamard.py From learning-circuits with Apache License 2.0 | 6 votes |
def hadamard_test(): # Hadamard matrix for n = 4 size = 4 M0 = Butterfly(size, diagonal=2, diag=torch.tensor([1.0, 1.0, -1.0, -1.0], requires_grad=True), subdiag=torch.ones(2, requires_grad=True), superdiag=torch.ones(2, requires_grad=True)) M1 = Butterfly(size, diagonal=1, diag=torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True), subdiag=torch.tensor([1.0, 0.0, 1.0], requires_grad=True), superdiag=torch.tensor([1.0, 0.0, 1.0], requires_grad=True)) H = M0.matrix() @ M1.matrix() assert torch.allclose(H, torch.tensor(hadamard(4), dtype=torch.float)) M = ButterflyProduct(size, fixed_order=True) M.factors[0] = M0 M.factors[1] = M1 assert torch.allclose(M.matrix(), H)
Example #3
Source File: test_fht.py From scikit-learn-extra with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_wikipedia_example(): input_ = np.array([1, 0, 1, 0, 0, 1, 1, 0], dtype=np.float64) copy = input_.copy() H = hadamard(8) cyfht(input_) npt.assert_array_equal(np.dot(copy, H), input_)
Example #4
Source File: test_fht.py From scikit-learn-extra with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_numerical_fuzzing_fht(): for length in [2, 4, 8, 16, 32, 64]: input_ = np.random.normal(size=length) copy = input_.copy() H = hadamard(length) cyfht(input_) npt.assert_array_almost_equal(np.dot(copy, H), input_)
Example #5
Source File: test_fht.py From scikit-learn-extra with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_numerical_fuzzing_fht2(): for length in [2, 4, 8, 16, 32, 64]: for rows in [1, 2, 3, 4, 5]: input_ = np.random.normal(size=(rows, length)) copy = input_.copy() H = hadamard(length) cyfht2(input_) npt.assert_array_almost_equal(np.dot(copy, H), input_)
Example #6
Source File: fixed_proj.py From convNet.pytorch with MIT License | 5 votes |
def __init__(self, input_size, output_size, bias=True, fixed_weights=True, fixed_scale=None): super(HadamardProj, self).__init__() self.output_size = output_size self.input_size = input_size sz = 2 ** int(math.ceil(math.log(max(input_size, output_size), 2))) mat = torch.from_numpy(hadamard(sz)) if fixed_weights: self.proj = Variable(mat, requires_grad=False) else: self.proj = nn.Parameter(mat) init_scale = 1. / math.sqrt(self.output_size) if fixed_scale is not None: self.scale = Variable(torch.Tensor( [fixed_scale]), requires_grad=False) else: self.scale = nn.Parameter(torch.Tensor([init_scale])) if bias: self.bias = nn.Parameter(torch.Tensor( output_size).uniform_(-init_scale, init_scale)) else: self.register_parameter('bias', None) self.eps = 1e-8
Example #7
Source File: hadamard.py From landmark-recognition-challenge with GNU General Public License v3.0 | 5 votes |
def call(self, x, training=None): is_training = training not in {0, False} output = K.l2_normalize(x, axis=-1) if self.l2_normalize else x output = -self.scale * K.dot(output, self.hadamard) # pity .dot requires both tensors to be same type, the last one could be int8 if self.output_raw_logits: output_logits = -self.scale * K.dot(x, self.hadamard) # probably better to reuse output * l2norm if self.use_bias: output = K.bias_add(output, self.bias) if self.output_raw_logits: output_logits = K.bias_add(output_logits, self.bias) if self.activation is not None: output = self.activation(output) if self.output_raw_logits: return [output, output_logits] return output
Example #8
Source File: learning_hadamard.py From learning-circuits with Apache License 2.0 | 5 votes |
def _setup(self, config): torch.manual_seed(config['seed']) self.model = ButterflyProduct(size=config['size'], fixed_order=config['fixed_order'], softmax_fn=config['softmax_fn']) if (not config['fixed_order']) and config['softmax_fn'] == 'softmax': self.semantic_loss_weight = config['semantic_loss_weight'] self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr']) self.n_steps_per_epoch = config['n_steps_per_epoch'] self.target_matrix = torch.tensor(hadamard(config['size']), dtype=torch.float)
Example #9
Source File: fixed_proj.py From fix_your_classifier with MIT License | 5 votes |
def __init__(self, input_size, output_size, bias=True, fixed_weights=True, fixed_scale=None): super(HadamardProj, self).__init__() self.output_size = output_size self.input_size = input_size sz = 2 ** int(math.ceil(math.log(max(input_size, output_size), 2))) mat = torch.from_numpy(hadamard(sz)) if fixed_weights: self.proj = Variable(mat, requires_grad=False) else: self.proj = nn.Parameter(mat) init_scale = 1. / math.sqrt(self.output_size) if fixed_scale is not None: self.scale = Variable(torch.Tensor( [fixed_scale]), requires_grad=False) else: self.scale = nn.Parameter(torch.Tensor([init_scale])) if bias: self.bias = nn.Parameter(torch.Tensor( output_size).uniform_(-init_scale, init_scale)) else: self.register_parameter('bias', None) self.eps = 1e-8
Example #10
Source File: target_matrix.py From learning-circuits with Apache License 2.0 | 4 votes |
def named_target_matrix(name, size): """ Parameter: name: name of the target matrix Return: target_matrix: (n, n) numpy array for real matrices or (n, n, 2) for complex matrices. """ if name == 'dft': return LA.dft(size, scale='sqrtn')[:, :, None].view('float64') elif name == 'idft': return np.ascontiguousarray(LA.dft(size, scale='sqrtn').conj().T)[:, :, None].view('float64') elif name == 'dft2': size_sr = int(math.sqrt(size)) matrix = np.fft.fft2(np.eye(size_sr**2).reshape(-1, size_sr, size_sr), norm='ortho').reshape(-1, size_sr**2) # matrix1d = LA.dft(size_sr, scale='sqrtn') # assert np.allclose(np.kron(m1d, m1d), matrix) # return matrix[:, :, None].view('float64') from butterfly.utils import bitreversal_permutation br_perm = bitreversal_permutation(size_sr) br_perm2 = np.arange(size_sr**2).reshape(size_sr, size_sr)[br_perm][:, br_perm].reshape(-1) matrix = np.ascontiguousarray(matrix[:, br_perm2]) return matrix[:, :, None].view('float64') elif name == 'dct': # Need to transpose as dct acts on rows of matrix np.eye, not columns # return dct(np.eye(size), norm='ortho').T return dct(np.eye(size)).T / math.sqrt(size) elif name == 'dst': return dst(np.eye(size)).T / math.sqrt(size) elif name == 'hadamard': return LA.hadamard(size) / math.sqrt(size) elif name == 'hadamard2': size_sr = int(math.sqrt(size)) matrix1d = LA.hadamard(size_sr) / math.sqrt(size_sr) return np.kron(matrix1d, matrix1d) elif name == 'b2': size_sr = int(math.sqrt(size)) import torch from butterfly import Block2x2DiagProduct b = Block2x2DiagProduct(size_sr) matrix1d = b(torch.eye(size_sr)).t().detach().numpy() return np.kron(matrix1d, matrix1d) elif name == 'convolution': np.random.seed(0) x = np.random.randn(size) return LA.circulant(x) / math.sqrt(size) elif name == 'hartley': return hartley_matrix(size) / math.sqrt(size) elif name == 'haar': return haar_matrix(size, normalized=True) / math.sqrt(size) elif name == 'legendre': grid = np.linspace(-1, 1, size + 2)[1:-1] return legendre.legvander(grid, size - 1).T / math.sqrt(size) elif name == 'hilbert': H = hilbert_matrix(size) return H / np.linalg.norm(H, 2) elif name == 'randn': np.random.seed(0) return np.random.randn(size, size) / math.sqrt(size) else: assert False, 'Target matrix name not recognized or implemented'