Python tensorflow.float32() Examples
The following are 30
code examples of tensorflow.float32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: networks.py From disentangling_conditional_gans with MIT License | 6 votes |
def minibatch_stddev_layer(x, group_size=4): with tf.variable_scope('MinibatchStddev'): group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. s = x.shape # [NCHW] Input shape. y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G. y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32. y -= tf.reduce_mean(y, axis=0, keep_dims=True) # [GMCHW] Subtract mean over group. y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group. y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group. y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True) # [M111] Take average over fmaps and pixels. y = tf.cast(y, x.dtype) # [M111] Cast back to original data type. y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels. return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. #---------------------------------------------------------------------------- # Generator network used in the paper.
Example #2
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_np_caches_graph_computation_for_eps_clip_or_xi(self): x_val = np.random.rand(1, 2) x_val = np.array(x_val, dtype=np.float32) self.attack.generate_np(x_val, eps=.3, num_iterations=10, clip_max=-5.0, clip_min=-5.0, xi=1e-6) old_grads = tf.gradients def fn(*x, **y): raise RuntimeError() tf.gradients = fn self.attack.generate_np(x_val, eps=.2, num_iterations=10, clip_max=-4.0, clip_min=-4.0, xi=1e-5) tf.gradients = old_grads
Example #3
Source File: siamese_network_semantic.py From deep-siamese-text-similarity with MIT License | 6 votes |
def stackedRNN(self, x, dropout, scope, embedding_size, sequence_length, hidden_units): n_hidden=hidden_units n_layers=3 # Prepare data shape to match `static_rnn` function requirements x = tf.unstack(tf.transpose(x, perm=[1, 0, 2])) # print(x) # Define lstm cells with tensorflow # Forward direction cell with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope): stacked_rnn_fw = [] for _ in range(n_layers): fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True) lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell,output_keep_prob=dropout) stacked_rnn_fw.append(lstm_fw_cell) lstm_fw_cell_m = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn_fw, state_is_tuple=True) outputs, _ = tf.nn.static_rnn(lstm_fw_cell_m, x, dtype=tf.float32) return outputs[-1]
Example #4
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_np_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x_adv = self.attack.generate_np(x_val, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=feed_labs) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #5
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_attack_strength(self): """ If clipping is not done at each iteration (not passing clip_min and clip_max to fgm), this attack fails by np.mean(orig_labels == new_labels) == .39. """ x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf, clip_min=0.5, clip_max=0.7, nb_iter=5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #6
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x = tf.placeholder(tf.float32, x_val.shape) y = tf.placeholder(tf.float32, feed_labs.shape) x_adv_p = self.attack.generate(x, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=y) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #7
Source File: baseop.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def wrap_variable(self, var): """wrap layer.w into variables""" val = self.lay.w.get(var, None) if val is None: shape = self.lay.wshape[var] args = [0., 1e-2, shape] if 'moving_mean' in var: val = np.zeros(shape) elif 'moving_variance' in var: val = np.ones(shape) else: val = np.random.normal(*args) self.lay.w[var] = val.astype(np.float32) self.act = 'Init ' if not self.var: return val = self.lay.w[var] self.lay.w[var] = tf.constant_initializer(val) if var in self._SLIM: return with tf.variable_scope(self.scope): self.lay.w[var] = tf.get_variable(var, shape = self.lay.wshape[var], dtype = tf.float32, initializer = self.lay.w[var])
Example #8
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_np_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x_adv = self.attack.generate_np(x_val, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=feed_labs) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #9
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def set_input_shape(self, input_shape): batch_size, rows, cols, input_channels = input_shape kernel_shape = tuple(self.kernel_shape) + (input_channels, self.output_channels) assert len(kernel_shape) == 4 assert all(isinstance(e, int) for e in kernel_shape), kernel_shape init = tf.random_normal(kernel_shape, dtype=tf.float32) init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=(0, 1, 2))) self.kernels = tf.Variable(init) self.b = tf.Variable( np.zeros((self.output_channels,)).astype('float32')) input_shape = list(input_shape) input_shape[0] = 1 dummy_batch = tf.zeros(input_shape) dummy_output = self.fprop(dummy_batch) output_shape = [int(e) for e in dummy_output.get_shape()] output_shape[0] = batch_size self.output_shape = tuple(output_shape)
Example #10
Source File: sequence.py From icme2019 with MIT License | 6 votes |
def call(self, x): if (self.size == None) or (self.mode == 'sum'): self.size = int(x.shape[-1]) position_j = 1. / \ K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size) position_j = K.expand_dims(position_j, 0) position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1 position_i = K.expand_dims(position_i, 2) position_ij = K.dot(position_i, position_j) outputs = K.concatenate( [K.cos(position_ij), K.sin(position_ij)], 2) if self.mode == 'sum': if self.scale: outputs = outputs * outputs ** 0.5 return x + outputs elif self.mode == 'concat': return K.concatenate([outputs, x], 2)
Example #11
Source File: model.py From Neural-LP with MIT License | 6 votes |
def _build_input(self): self.tails = tf.placeholder(tf.int32, [None]) self.heads = tf.placeholder(tf.int32, [None]) self.targets = tf.one_hot(indices=self.heads, depth=self.num_entity) if not self.query_is_language: self.queries = tf.placeholder(tf.int32, [None, self.num_step]) self.query_embedding_params = tf.Variable(self._random_uniform_unit( self.num_query + 1, # <END> token self.query_embed_size), dtype=tf.float32) rnn_inputs = tf.nn.embedding_lookup(self.query_embedding_params, self.queries) else: self.queries = tf.placeholder(tf.int32, [None, self.num_step, self.num_word]) self.vocab_embedding_params = tf.Variable(self._random_uniform_unit( self.num_vocab + 1, # <END> token self.vocab_embed_size), dtype=tf.float32) embedded_query = tf.nn.embedding_lookup(self.vocab_embedding_params, self.queries) rnn_inputs = tf.reduce_mean(embedded_query, axis=2) return rnn_inputs
Example #12
Source File: preview.py From cwavegan with MIT License | 6 votes |
def noise_input_fn(params): """Input function for generating samples for PREDICT mode. Generates a single Tensor of fixed random noise. Use tf.data.Dataset to signal to the estimator when to terminate the generator returned by predict(). Args: params: param `dict` passed by TPUEstimator. Returns: 1-element `dict` containing the randomly generated noise. """ # random noise np.random.seed(0) noise_dataset = tf.data.Dataset.from_tensors(tf.constant( np.random.randn(params['batch_size'], FLAGS.noise_dim), dtype=tf.float32)) noise = noise_dataset.make_one_shot_iterator().get_next() return {'random_noise': noise}, None
Example #13
Source File: dump_tfrecord.py From cwavegan with MIT License | 6 votes |
def _mapper(example_proto): features = { 'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True), 'label': tf.FixedLenSequenceFeature([], tf.string, allow_missing=True) } example = tf.parse_single_example(example_proto, features) wav = example['samples'][:, 0] wav = wav[:16384] wav_len = tf.shape(wav)[0] wav = tf.pad(wav, [[0, 16384 - wav_len]]) label = tf.reduce_join(example['label'], 0) return wav, label
Example #14
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def build_pgd_attack(self, eps): victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32) def one_step_attack(image, grad): """ core components of this attack are: (a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf) (b) momentum (https://arxiv.org/pdf/1710.06081.pdf) (c) input diversity (https://arxiv.org/pdf/1803.06978.pdf) """ orig_image = image image = self.structure(image) image = (image - 127.5) / 128.0 image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2) prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.reshape(embeddings[0], [512, 1]) objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized noise, = tf.gradients(objective, orig_image) noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True) noise = 0.9 * grad + noise adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound) return adv, noise input = tf.to_float(self.image_batch) lower_bound = tf.clip_by_value(input - eps, 0, 255.) upper_bound = tf.clip_by_value(input + eps, 0, 255.) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): adv, _ = tf.while_loop( lambda _, __: True, one_step_attack, (input, tf.zeros_like(input)), back_prop=False, maximum_iterations=100, parallel_iterations=1) self.adv_image = adv return adv
Example #15
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) x = tf.placeholder(tf.float32, x_val.shape) x_adv_p = self.attack.generate(x, over_shoot=0.02, max_iter=50, nb_candidate=2, clip_min=-5, clip_max=5) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #16
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), orig_labs] = 1 x = tf.placeholder(tf.float32, x_val.shape) y = tf.placeholder(tf.float32, feed_labs.shape) x_adv_p = self.attack.generate(x, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y=y) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #17
Source File: dataset.py From disentangling_conditional_gans with MIT License | 6 votes |
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): self.resolution = resolution self.resolution_log2 = int(np.log2(resolution)) self.shape = [num_channels, resolution, resolution] self.dtype = dtype self.dynamic_range = dynamic_range self.label_size = label_size self.label_dtype = label_dtype self._tf_minibatch_var = None self._tf_lod_var = None self._tf_minibatch_np = None self._tf_labels_np = None assert self.resolution == 2 ** self.resolution_log2 with tf.name_scope('Dataset'): self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
Example #18
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def autosummary(name, value): id = name.replace('/', '_') if is_tf_expression(value): with tf.name_scope('summary_' + id), tf.device(value.device): update_op = _create_autosummary_var(name, value) with tf.control_dependencies([update_op]): return tf.identity(value) else: # python scalar or numpy array if name not in _autosummary_immediate: with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None): update_value = tf.placeholder(tf.float32) update_op = _create_autosummary_var(name, update_value) _autosummary_immediate[name] = update_op, update_value update_op, update_value = _autosummary_immediate[name] run(update_op, {update_value: np.float32(value)}) return value # Create the necessary ops to include autosummaries in TensorBoard report. # Note: This should be done only once per graph.
Example #19
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def _create_autosummary_var(name, value_expr): assert not _autosummary_finalized v = tf.cast(value_expr, tf.float32) if v.shape.ndims is 0: v = [v, np.float32(1.0)] elif v.shape.ndims is 1: v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)] else: v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))] v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2)) with tf.control_dependencies(None): var = tf.Variable(tf.zeros(2)) # [numerator, denominator] update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v)) if name in _autosummary_vars: _autosummary_vars[name].append(var) else: _autosummary_vars[name] = [var] return update_op #---------------------------------------------------------------------------- # Call filewriter.add_summary() with all summaries in the default graph, # automatically finalizing and merging them on the first call.
Example #20
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), orig_labs] = 1 x = tf.placeholder(tf.float32, x_val.shape) y = tf.placeholder(tf.float32, feed_labs.shape) x_adv_p = self.attack.generate(x, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y=y) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #21
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_np_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x_adv = self.attack.generate_np(x_val, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=feed_labs) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #22
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_attack_strength(self): """ If clipping is not done at each iteration (not using clip_min and clip_max), this attack fails by np.mean(orig_labels == new_labels) == .5 """ x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.05, clip_min=0.5, clip_max=0.7, nb_iter=5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #23
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np_high_confidence_targeted_examples(self): trivial_model = TrivialModel() for CONFIDENCE in [0, 2.3]: x_val = np.random.rand(10, 1) - .5 x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((10, 2)) feed_labs[np.arange(10), np.random.randint(0, 2, 10)] = 1 attack = CarliniWagnerL2(trivial_model, sess=self.sess) x_adv = attack.generate_np(x_val, max_iterations=100, binary_search_steps=2, learning_rate=1e-2, initial_const=1, clip_min=-10, clip_max=10, confidence=CONFIDENCE, y_target=feed_labs, batch_size=10) new_labs = self.sess.run(trivial_model.get_logits(x_adv)) good_labs = new_labs[np.arange(10), np.argmax(feed_labs, axis=1)] bad_labs = new_labs[np.arange( 10), 1 - np.argmax(feed_labs, axis=1)] self.assertClose(CONFIDENCE, np.min(good_labs - bad_labs), atol=1e-1) self.assertTrue(np.mean(np.argmax(new_labs, axis=1) == np.argmax(feed_labs, axis=1)) > .9)
Example #24
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np_can_be_called_with_different_decay_factor(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) for dacay_factor in [0.0, 0.5, 1.0]: x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf, dacay_factor=dacay_factor, clip_min=-5.0, clip_max=5.0) delta = np.max(np.abs(x_adv - x_val), axis=1) self.assertClose(delta, 0.5)
Example #25
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def generate_adversarial_examples_np(self, ord, eps, **kwargs): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=eps, ord=ord, clip_min=-5, clip_max=5, **kwargs) if ord == np.inf: delta = np.max(np.abs(x_adv - x_val), axis=1) elif ord == 1: delta = np.sum(np.abs(x_adv - x_val), axis=1) elif ord == 2: delta = np.sum(np.square(x_adv - x_val), axis=1)**.5 return x_val, x_adv, delta
Example #26
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np_does_not_cache_graph_computation_for_nb_iter(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf, clip_min=-5.0, clip_max=5.0, nb_iter=10) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) ok = [False] old_grads = tf.gradients def fn(*x, **y): ok[0] = True return old_grads(*x, **y) tf.gradients = fn x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf, clip_min=-5.0, clip_max=5.0, nb_iter=11) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) tf.gradients = old_grads self.assertTrue(ok[0])
Example #27
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np_high_confidence_untargeted_examples(self): trivial_model = TrivialModel() for CONFIDENCE in [0, 2.3]: x_val = np.random.rand(10, 1) - .5 x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(trivial_model.get_logits(x_val)), axis=1) attack = CarliniWagnerL2(trivial_model, sess=self.sess) x_adv = attack.generate_np(x_val, max_iterations=100, binary_search_steps=2, learning_rate=1e-2, initial_const=1, clip_min=-10, clip_max=10, confidence=CONFIDENCE, batch_size=10) new_labs = self.sess.run(trivial_model.get_logits(x_adv)) good_labs = new_labs[np.arange(10), 1 - orig_labs] bad_labs = new_labs[np.arange(10), orig_labs] self.assertTrue(np.mean(np.argmax(new_labs, axis=1) == orig_labs) == 0) self.assertTrue(np.isclose( 0, np.min(good_labs - (bad_labs + CONFIDENCE)), atol=1e-1))
Example #28
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np_can_be_called_with_different_eps(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) for eps in [0.1, 0.2, 0.3, 0.4]: x_adv = self.attack.generate_np(x_val, eps=eps, ord=np.inf, clip_min=-5.0, clip_max=5.0) delta = np.max(np.abs(x_adv - x_val), axis=1) self.assertClose(delta, eps)
Example #29
Source File: tf_logits.py From Black-Box-Audio with MIT License | 5 votes |
def get_logits(new_input, length, first=[]): """ Compute the logits for a given waveform. First, preprocess with the TF version of MFC above, and then call DeepSpeech on the features. """ # new_input = tf.Print(new_input, [tf.shape(new_input)]) # We need to init DeepSpeech the first time we're called if first == []: first.append(False) # Okay, so this is ugly again. # We just want it to not crash. tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt" DeepSpeech.initialize_globals() print('initialized deepspeech globals') batch_size = new_input.get_shape()[0] # 1. Compute the MFCCs for the input audio # (this is differentable with our implementation above) empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32) new_input_to_mfcc = compute_mfcc(new_input)[:, ::2] features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1) # 2. We get to see 9 frames at a time to make our decision, # so concatenate them together. features = tf.reshape(features, [new_input.get_shape()[0], -1]) features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1) features = tf.reshape(features, [batch_size, -1, 19*26]) # 3. Whiten the data mean, var = tf.nn.moments(features, axes=[0,1,2]) features = (features-mean)/(var**.5) # 4. Finally we process it with DeepSpeech logits = DeepSpeech.BiRNN(features, length, [0]*10) return logits
Example #30
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_generate_np_gives_clipped_adversarial_examples(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, max_iterations=10, binary_search_steps=1, learning_rate=1e-3, initial_const=1, clip_min=-0.2, clip_max=0.3, batch_size=100) self.assertTrue(-0.201 < np.min(x_adv)) self.assertTrue(np.max(x_adv) < .301)