Python numpy.sqrt() Examples
The following are 30
code examples of numpy.sqrt().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: spectrum_painter.py From spectrum_painter with MIT License | 7 votes |
def convert_image(self, filename): pic = img.imread(filename) # Set FFT size to be double the image size so that the edge of the spectrum stays clear # preventing some bandfilter artifacts self.NFFT = 2*pic.shape[1] # Repeat image lines until each one comes often enough to reach the desired line time ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256. # Embed image in center bins of the FFT fftall = np.zeros((ffts.shape[0], self.NFFT)) startbin = int(self.NFFT/4) fftall[:, startbin:(startbin+pic.shape[1])] = ffts # Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output # The phases won't be visible in the spectrum phases = 2*np.pi*np.random.rand(*fftall.shape) rffts = fftall * np.exp(1j*phases) # Perform the FFT per image line, then concatenate them to form the final signal timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT)) linear = timedata.flatten() linear = linear / np.max(np.abs(linear)) return linear
Example #2
Source File: simulate_sin.py From deep-learning-note with MIT License | 6 votes |
def run_eval(sess, test_X, test_y): ds = tf.data.Dataset.from_tensor_slices((test_X, test_y)) ds = ds.batch(1) X, y = ds.make_one_shot_iterator().get_next() with tf.variable_scope("model", reuse=True): prediction, _, _ = lstm_model(X, [0.0], False) predictions = [] labels = [] for i in range(TESTING_EXAMPLES): p, l = sess.run([prediction, y]) predictions.append(p) labels.append(l) predictions = np.array(predictions).squeeze() labels = np.array(labels).squeeze() rmse = np.sqrt(((predictions-labels) ** 2).mean(axis=0)) print("Mean Square Error is: %f" % rmse) plt.figure() plt.plot(predictions, label='predictions') plt.plot(labels, label='real_sin') plt.legend() plt.show()
Example #3
Source File: point_cloud.py From FRIDA with MIT License | 6 votes |
def classical_mds(self, D): ''' Classical multidimensional scaling Parameters ---------- D : square 2D ndarray Euclidean Distance Matrix (matrix containing squared distances between points ''' # Apply MDS algorithm for denoising n = D.shape[0] J = np.eye(n) - np.ones((n,n))/float(n) G = -0.5*np.dot(J, np.dot(D, J)) s, U = np.linalg.eig(G) # we need to sort the eigenvalues in decreasing order s = np.real(s) o = np.argsort(s) s = s[o[::-1]] U = U[:,o[::-1]] S = np.diag(s)[0:self.dim,:] self.X = np.dot(np.sqrt(S),U.T)
Example #4
Source File: point_cloud.py From FRIDA with MIT License | 6 votes |
def trilateration(self, D): ''' Find the location of points based on their distance matrix using trilateration Parameters ---------- D : square 2D ndarray Euclidean Distance Matrix (matrix containing squared distances between points ''' dist = np.sqrt(D) # Simpler algorithm (no denoising) self.X = np.zeros((self.dim, self.m)) self.X[:,1] = np.array([0, dist[0,1]]) for i in xrange(2,m): self.X[:,i] = self.trilateration_single_point(self.X[1,1], dist[0,i], dist[1,i])
Example #5
Source File: layers.py From deep-learning-note with MIT License | 6 votes |
def __forward(self, x, train_flg): if self.running_mean is None: N, D = x.shape self.running_mean = np.zeros(D) self.running_var = np.zeros(D) if train_flg: mu = x.mean(axis=0) xc = x - mu var = np.mean(xc ** 2, axis=0) std = np.sqrt(var + 10e-7) xn = xc / std self.batch_size = x.shape[0] self.xc = xc self.xn = xn self.std = std self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mu self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var else: xc = x - self.running_mean xn = xc / ((np.sqrt(self.running_var + 10e-7))) out = self.gamma * xn + self.beta return out
Example #6
Source File: xrft.py From xrft with MIT License | 6 votes |
def _radial_wvnum(k, l, N, nfactor): """ Creates a radial wavenumber based on two horizontal wavenumbers along with the appropriate index map """ # compute target wavenumbers k = k.values l = l.values K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2) nbins = int(N/nfactor) if k.max() > l.max(): ki = np.linspace(0., l.max(), nbins) else: ki = np.linspace(0., k.max(), nbins) # compute bin index kidx = np.digitize(np.ravel(K), ki) # compute number of points for each wavenumber area = np.bincount(kidx) # compute the average radial wavenumber for each bin kr = (np.bincount(kidx, weights=K.ravel()) / np.ma.masked_where(area==0, area)) return ki, kr[1:-1]
Example #7
Source File: optimizer.py From deep-learning-note with MIT License | 6 votes |
def update(self, params, grads): if self.m is None: self.m, self.v = {}, {} for key, val in params.items(): self.m[key] = np.zeros_like(val) self.v[key] = np.zeros_like(val) self.iter += 1 lr_t = self.lr * np.sqrt(1.0 - self.beta2 ** self.iter) / (1.0 - self.beta1 ** self.iter) for key in params.keys(): # self.m[key] = self.beta1*self.m[key] + (1-self.beta1)*grads[key] # self.v[key] = self.beta2*self.v[key] + (1-self.beta2)*(grads[key]**2) self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key]) self.v[key] += (1 - self.beta2) * (grads[key] ** 2 - self.v[key]) params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7) # unbias_m += (1 - self.beta1) * (grads[key] - self.m[key]) # correct bias # unbisa_b += (1 - self.beta2) * (grads[key]*grads[key] - self.v[key]) # correct bias # params[key] += self.lr * unbias_m / (np.sqrt(unbisa_b) + 1e-7)
Example #8
Source File: multi_layer_net_extend.py From deep-learning-note with MIT License | 6 votes |
def __init_weight(self, weight_init_std): """设定权重的初始值 Parameters ---------- weight_init_std : 指定权重的标准差(e.g. 0.01) 指定'relu'或'he'的情况下设定“He的初始值” 指定'sigmoid'或'xavier'的情况下设定“Xavier的初始值” """ all_size_list = [self.input_size] + self.hidden_size_list + [self.output_size] for idx in range(1, len(all_size_list)): scale = weight_init_std if str(weight_init_std).lower() in ('relu', 'he'): scale = np.sqrt(2.0 / all_size_list[idx - 1]) # 使用ReLU的情况下推荐的初始值 elif str(weight_init_std).lower() in ('sigmoid', 'xavier'): scale = np.sqrt(1.0 / all_size_list[idx - 1]) # 使用sigmoid的情况下推荐的初始值 self.params['W' + str(idx)] = scale * np.random.randn(all_size_list[idx - 1], all_size_list[idx]) self.params['b' + str(idx)] = np.zeros(all_size_list[idx])
Example #9
Source File: initializations.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def get_fans(shape, dim_ordering='th'): if len(shape) == 2: fan_in = shape[0] fan_out = shape[1] elif len(shape) == 4 or len(shape) == 5: # assuming convolution kernels (2D or 3D). # TH kernel shape: (depth, input_depth, ...) # TF kernel shape: (..., input_depth, depth) if dim_ordering == 'th': receptive_field_size = np.prod(shape[2:]) fan_in = shape[1] * receptive_field_size fan_out = shape[0] * receptive_field_size elif dim_ordering == 'tf': receptive_field_size = np.prod(shape[:2]) fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size else: raise ValueError('Invalid dim_ordering: ' + dim_ordering) else: # no specific assumptions fan_in = np.sqrt(np.prod(shape)) fan_out = np.sqrt(np.prod(shape)) return fan_in, fan_out
Example #10
Source File: tools_fri_doa_plane.py From FRIDA with MIT License | 6 votes |
def mtx_freq2visi(M, p_mic_x, p_mic_y): """ build the matrix that maps the Fourier series to the visibility :param M: the Fourier series expansion is limited from -M to M :param p_mic_x: a vector that constains microphones x coordinates :param p_mic_y: a vector that constains microphones y coordinates :return: """ num_mic = p_mic_x.size ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F') G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C') count_G = 0 for q in range(num_mic): p_x_outer = p_mic_x[q] p_y_outer = p_mic_y[q] for qp in range(num_mic): if not q == qp: p_x_qqp = p_x_outer - p_mic_x[qp] p_y_qqp = p_y_outer - p_mic_y[qp] norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2) phi_qqp = np.arctan2(p_y_qqp, p_x_qqp) G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \ np.exp(1j * ms * phi_qqp) count_G += 1 return G
Example #11
Source File: picklable_model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def set_input_shape(self, input_shape): batch_size, dim = input_shape self.input_shape = [batch_size, dim] self.output_shape = [batch_size, self.num_hid] if self.init_mode == "norm": init = tf.random_normal([dim, self.num_hid], dtype=tf.float32) init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0, keep_dims=True)) init = init * self.init_scale elif self.init_mode == "uniform_unit_scaling": scale = np.sqrt(3. / dim) init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32, minval=-scale, maxval=scale) else: raise ValueError(self.init_mode) self.W = PV(init) if self.use_bias: self.b = PV((np.zeros((self.num_hid,)) + self.init_b).astype('float32'))
Example #12
Source File: von_mises_stress.py From fenics-topopt with MIT License | 6 votes |
def calculate_diff_stress(self, x, u, nu, side=1): """ Calculate the derivative of the Von Mises stress given the densities x, displacements u, and young modulus nu. Optionally, provide the side length (default: 1). """ rho = self.penalized_densities(x) EB = self.E(nu).dot(self.B(side)) EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])]) s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3) drho = self.diff_penalized_densities(x) ds11, ds22, ds12 = numpy.hsplit( ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3) vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2) if abs(vm_stress).sum() > 1e-8: dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 - ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12)) return dvm_stress return 0
Example #13
Source File: dynamic.py From StructEngPy with MIT License | 6 votes |
def solve_modal(model,k:int): """ Solve eigen mode of the MDOF system params: model: FEModel. k: number of modes to extract. """ K_,M_=model.K_,model.M_ if k>model.DOF: logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF) k=model.DOF omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM') delta = modes/np.sum(modes,axis=0) model.is_solved=True model.mode_=delta model.omega_=np.sqrt(omega2s).reshape((k,1))
Example #14
Source File: util.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def point_on_segment(ac, b, atol=1e-8): ''' point_on_segment((a,b), c) yields True if point x is on segment (a,b) and False otherwise. Note that this differs from point_in_segment in that a point that if c is equal to a or b it is considered 'on' but not 'in' the segment. The option atol can be given and is used only to test for difference from 0; by default it is 1e-8. ''' (a,c) = ac abc = [np.asarray(u) for u in (a,b,c)] if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc] else: (a,b,c) = abc vab = b - a vbc = c - b vac = c - a dab = np.sqrt(np.sum(vab**2, axis=0)) dbc = np.sqrt(np.sum(vbc**2, axis=0)) dac = np.sqrt(np.sum(vac**2, axis=0)) return np.isclose(dab + dbc - dac, 0, atol=atol)
Example #15
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def white_to_pial_vectors(white_surface, pial_surface): ''' cortex.white_to_pial_vectors is a (3 x n) matrix of the unit direction vectors that point from the n vertices in the cortex's white surface to their equivalent positions in the pial surface. ''' u = pial_surface.coordinates - white_surface.coordinates d = np.sqrt(np.sum(u**2, axis=0)) z = np.isclose(d, 0) return pimms.imm_array(np.logical_not(z) / (d + z))
Example #16
Source File: initialize.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 5 votes |
def init_lstm_weight(lstm, num_layer=1, seed=1337): """初始化lstm权重 Args: lstm: torch.nn.LSTM num_layer: int, lstm层数 seed: int """ for i in range(num_layer): weight_h = getattr(lstm, 'weight_hh_l{0}'.format(i)) scope = np.sqrt(6.0 / (weight_h.size(0)/4. + weight_h.size(1))) torch.manual_seed(seed) nn.init.uniform_(getattr(lstm, 'weight_hh_l{0}'.format(i)), -scope, scope) weight_i = getattr(lstm, 'weight_ih_l{0}'.format(i)) scope = np.sqrt(6.0 / (weight_i.size(0)/4. + weight_i.size(1))) torch.manual_seed(seed) nn.init.uniform_(getattr(lstm, 'weight_ih_l{0}'.format(i)), -scope, scope) if lstm.bias: for i in range(num_layer): weight_h = getattr(lstm, 'bias_hh_l{0}'.format(i)) weight_h.data.zero_() weight_h.data[lstm.hidden_size: 2*lstm.hidden_size] = 1 weight_i = getattr(lstm, 'bias_ih_l{0}'.format(i)) weight_i.data.zero_() weight_i.data[lstm.hidden_size: 2*lstm.hidden_size] = 1
Example #17
Source File: initialize.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 5 votes |
def init_cnn(cnn_layer, seed=1337): """初始化cnn层权重 Args: cnn_layer: weight.size() == [nb_filter, in_channels, [kernel_size]] seed: int """ filter_nums = cnn_layer.weight.size(0) kernel_size = cnn_layer.weight.size()[2:] scope = np.sqrt(2. / (filter_nums * np.prod(kernel_size))) torch.manual_seed(seed) nn.init.xavier_normal_(cnn_layer.weight) cnn_layer.bias.data.uniform_(-scope, scope)
Example #18
Source File: initialize.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 5 votes |
def init_cnn_weight(cnn_layer, seed=1337): """初始化cnn层权重 Args: cnn_layer: weight.size() == [nb_filter, in_channels, [kernel_size]] seed: int """ filter_nums = cnn_layer.weight.size(0) kernel_size = cnn_layer.weight.size()[2:] scope = np.sqrt(2. / (filter_nums * np.prod(kernel_size))) torch.manual_seed(seed) nn.init.normal_(cnn_layer.weight, -scope, scope) cnn_layer.bias.data.zero_()
Example #19
Source File: nistats.py From NiBetaSeries with MIT License | 5 votes |
def _calc_beta_map(model, trial_type, hrf_model, tstat): """ Calculates the beta estimates for every voxel from a nistats model Parameters ---------- model : nistats.first_level_model.FirstLevelModel a fit model of the first level results trial_type : str the trial to create the beta estimate hrf_model : str the hemondynamic response function used to fit the model tstat : bool return the t-statistic for the betas instead of the raw estimates Returns ------- beta_map : nibabel.nifti2.Nifti2Image nifti image containing voxelwise beta estimates """ import numpy as np # make it so we do not divide by zero TINY = 1e-50 raw_beta_map = _estimate_map(model, trial_type, hrf_model, 'effect_size') if tstat: var_map = _estimate_map(model, trial_type, hrf_model, 'effect_variance') tstat_array = raw_beta_map.get_fdata() / np.sqrt(np.maximum(var_map.get_fdata(), TINY)) return nib.Nifti2Image(tstat_array, raw_beta_map.affine, raw_beta_map.header) else: return raw_beta_map
Example #20
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def _diff_order(n): u0 = np.arange(n) d = int(np.ceil(np.sqrt(n))) mtx = np.reshape(np.pad(u0, [(0,d*d-n)], 'constant', constant_values=[(0,-1)]), (d,d)) h = int((d+1)/2) u = np.vstack([mtx[::2], mtx[1::2]]).T.flatten() return u[u >= 0]
Example #21
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np(self): x_val = np.random.rand(100, 1000) perturbation = self.attack.generate_np(x_val) - x_val perturbation_norm = np.sqrt(np.sum(perturbation**2, axis=1)) # test perturbation norm self.assertClose(perturbation_norm, self.attack.eps)
Example #22
Source File: track.py From animal-tracking with Creative Commons Zero v1.0 Universal | 5 votes |
def angle_cos(p0, p1, p2): d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float') return np.abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))
Example #23
Source File: DeepFM.py From tensorflow-DeepFM with MIT License | 5 votes |
def _initialize_weights(self): weights = dict() # embeddings weights["feature_embeddings"] = tf.Variable( tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01), name="feature_embeddings") # feature_size * K weights["feature_bias"] = tf.Variable( tf.random_uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1 # deep layers num_layer = len(self.deep_layers) input_size = self.field_size * self.embedding_size glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0])) weights["layer_0"] = tf.Variable( np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32) weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])), dtype=np.float32) # 1 * layers[0] for i in range(1, num_layer): glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i])) weights["layer_%d" % i] = tf.Variable( np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])), dtype=np.float32) # layers[i-1] * layers[i] weights["bias_%d" % i] = tf.Variable( np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])), dtype=np.float32) # 1 * layer[i] # final concat projection layer if self.use_fm and self.use_deep: input_size = self.field_size + self.embedding_size + self.deep_layers[-1] elif self.use_fm: input_size = self.field_size + self.embedding_size elif self.use_deep: input_size = self.deep_layers[-1] glorot = np.sqrt(2.0 / (input_size + 1)) weights["concat_projection"] = tf.Variable( np.random.normal(loc=0, scale=glorot, size=(input_size, 1)), dtype=np.float32) # layers[i-1]*layers[i] weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32) return weights
Example #24
Source File: utils_tf.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def clip_eta(eta, ord, eps): """ Helper function to clip the perturbation to epsilon norm ball. :param eta: A tensor with the current perturbation. :param ord: Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param eps: Epilson, bound of the perturbation. """ # Clipping perturbation eta to self.ord norm ball if ord not in [np.inf, 1, 2]: raise ValueError('ord must be np.inf, 1, or 2.') reduc_ind = list(xrange(1, len(eta.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: eta = tf.clip_by_value(eta, -eps, eps) else: if ord == 1: norm = tf.maximum(avoid_zero_div, reduce_sum(tf.abs(eta), reduc_ind, keepdims=True)) elif ord == 2: # avoid_zero_div must go inside sqrt to avoid a divide by zero # in the gradient through this operation norm = tf.sqrt(tf.maximum(avoid_zero_div, reduce_sum(tf.square(eta), reduc_ind, keepdims=True))) # We must *clip* to within the norm ball, not *normalize* onto the # surface of the ball factor = tf.minimum(1., eps / norm) eta = eta * factor return eta
Example #25
Source File: utils_tf.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def l2_batch_normalize(x, epsilon=1e-12, scope=None): """ Helper function to normalize a batch of vectors. :param x: the input placeholder :param epsilon: stabilizes division :return: the batch of l2 normalized vector """ with tf.name_scope(scope, "l2_batch_normalize") as scope: x_shape = tf.shape(x) x = tf.contrib.layers.flatten(x) x /= (epsilon + reduce_max(tf.abs(x), 1, keepdims=True)) square_sum = reduce_sum(tf.square(x), 1, keepdims=True) x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum) x_norm = tf.multiply(x, x_inv_norm) return tf.reshape(x_norm, x_shape, scope)
Example #26
Source File: picklable_model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def fprop(self, x, **kwargs): shape = tf.shape(x) batch_size = shape[0] x = tf.reshape(x, (batch_size,) + self.expanded_shape) mean, var = tf.nn.moments(x, [1, 2, 3], keep_dims=True) x = (x - mean) / tf.sqrt(var + self.eps) x = tf.reshape(x, shape) x = x * self.gamma.var + self.beta.var return x
Example #27
Source File: picklable_model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def set_input_shape(self, input_shape): batch_size, rows, cols, input_channels = input_shape assert len(self.kernel_shape) == 2 kernel_shape = tuple(self.kernel_shape) + (input_channels, self.output_channels) assert len(kernel_shape) == 4 assert all(isinstance(e, int) for e in kernel_shape), kernel_shape if self.init_mode == "norm": init = tf.random_normal(kernel_shape, dtype=tf.float32) squared_norms = tf.reduce_sum(tf.square(init), axis=(0, 1, 2)) denom = tf.sqrt(1e-7 + squared_norms) init = self.init_scale * init / denom elif self.init_mode == "inv_sqrt": fan_out = self.kernel_shape[0] * \ self.kernel_shape[1] * self.output_channels init = tf.random_normal(kernel_shape, dtype=tf.float32, stddev=np.sqrt(2.0 / fan_out)) else: raise ValueError(self.init_mode) self.kernels = PV(init, name=self.name + "_kernels") if self.use_bias: self.b = PV(np.zeros((self.output_channels,)).astype('float32')) input_shape = list(input_shape) orig_batch_size = input_shape[0] input_shape[0] = 1 dummy_batch = tf.zeros(input_shape) dummy_output = self.fprop(dummy_batch) output_shape = [int(e) for e in dummy_output.get_shape()] output_shape[0] = orig_batch_size self.output_shape = tuple(output_shape)
Example #28
Source File: madry_cifar10_model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _conv(name, x, filter_size, in_filters, out_filters, strides): """Convolution.""" with tf.variable_scope(name, reuse=tf.AUTO_REUSE): n = filter_size * filter_size * out_filters kernel = tf.get_variable( 'DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer( stddev=np.sqrt(2.0 / n))) return tf.nn.conv2d(x, kernel, strides, padding='SAME')
Example #29
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_attack_strength(self): """ This test generates a random source and guide and feeds them in a randomly initialized CNN. Checks if an adversarial example can get at least 50% closer to the guide compared to the original distance of the source and the guide. """ tf.set_random_seed(1234) input_shape = self.input_shape x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.)) x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.)) layer = 'fc7' attack_params = {'eps': 5./256, 'clip_min': 0., 'clip_max': 1., 'nb_iter': 10, 'eps_iter': 0.005, 'layer': layer} x_adv = self.attack.generate(x_src, x_guide, **attack_params) h_adv = self.model.fprop(x_adv)[layer] h_src = self.model.fprop(x_src)[layer] h_guide = self.model.fprop(x_guide)[layer] init = tf.global_variables_initializer() self.sess.run(init) ha, hs, hg, xa, xs, xg = self.sess.run( [h_adv, h_src, h_guide, x_adv, x_src, x_guide]) d_as = np.sqrt(((hs-ha)*(hs-ha)).sum()) d_ag = np.sqrt(((hg-ha)*(hg-ha)).sum()) d_sg = np.sqrt(((hg-hs)*(hg-hs)).sum()) print("L2 distance between source and adversarial example `%s`: %.4f" % (layer, d_as)) print("L2 distance between guide and adversarial example `%s`: %.4f" % (layer, d_ag)) print("L2 distance between source and guide `%s`: %.4f" % (layer, d_sg)) print("d_ag/d_sg*100 `%s`: %.4f" % (layer, d_ag*100/d_sg)) self.assertTrue(d_ag*100/d_sg < 50.)
Example #30
Source File: suba.py From libTLDA with MIT License | 5 votes |
def zca_whiten(self, X): """ Perform ZCA whitening (aka Mahalanobis whitening). Parameters ---------- X : array (M samples x D features) data matrix. Returns ------- X : array (M samples x D features) whitened data. """ # Covariance matrix Sigma = np.cov(X.T) # Singular value decomposition U, S, V = svd(Sigma) # Whitening constant to prevent division by zero epsilon = 1e-5 # ZCA whitening matrix W = np.dot(U, np.dot(np.diag(1.0 / np.sqrt(S + epsilon)), V)) # Apply whitening matrix return np.dot(X, W)