Python cifar10_input.CIFAR10Data() Examples
The following are 2
code examples of cifar10_input.CIFAR10Data().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cifar10_input
, or try the search function
.
Example #1
Source File: eval_ch.py From MultiRobustness with MIT License | 4 votes |
def evaluate_ch(model, config, sess, norm='l1', bound=None, verbose=True): dataset = config['data'] num_eval_examples = config['num_eval_examples'] eval_batch_size = config['eval_batch_size'] if dataset == "mnist": from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=False) X = mnist.test.images[0:num_eval_examples, :].reshape(-1, 28, 28, 1) Y = mnist.test.labels[0:num_eval_examples] x_image = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) else: import cifar10_input data_path = config["data_path"] cifar = cifar10_input.CIFAR10Data(data_path) X = cifar.eval_data.xs[0:num_eval_examples, :].astype(np.float32) / 255.0 Y = cifar.eval_data.ys[0:num_eval_examples] x_image = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) assert norm == 'l1' if norm=='l2': attack = CarliniWagnerL2(model, sess) params = {'batch_size': eval_batch_size, 'binary_search_steps': 9} else: attack = ElasticNetMethod(model, sess, clip_min=0.0, clip_max=1.0) params = {'beta': 1e-2, 'decision_rule': 'L1', 'batch_size': eval_batch_size, 'learning_rate': 1e-2, 'max_iterations': 1000} if verbose: set_log_level(logging.DEBUG, name="cleverhans") y = tf.placeholder(tf.int64, shape=[None, 10]) params['y'] = y adv_x = attack.generate(x_image, **params) preds_adv = model.get_predicted_class(adv_x) preds_nat = model.get_predicted_class(x_image) all_preds, all_preds_adv, all_adv_x = batch_eval( sess, [x_image, y], [preds_nat, preds_adv, adv_x], [X, one_hot(Y, 10)], batch_size=eval_batch_size) print('acc nat', np.mean(all_preds == Y)) print('acc adv', np.mean(all_preds_adv == Y)) if dataset == "cifar10": X *= 255.0 all_adv_x *= 255.0 if norm == 'l2': lps = np.sqrt(np.sum(np.square(all_adv_x - X), axis=(1,2,3))) else: lps = np.sum(np.abs(all_adv_x - X), axis=(1,2,3)) print('mean lp: ', np.mean(lps)) for b in [bound, bound/2.0, bound/4.0, bound/8.0]: print('lp={}, acc={}'.format(b, np.mean((all_preds_adv == Y) | (lps > b)))) all_corr_adv = (all_preds_adv == Y) all_corr_nat = (all_preds == Y) return all_corr_nat, all_corr_adv, lps
Example #2
Source File: run_attack.py From cifar10_challenge with MIT License | 4 votes |
def run_attack(checkpoint, x_adv, epsilon): cifar = cifar10_input.CIFAR10Data(data_path) model = Model(mode='eval') saver = tf.train.Saver() num_eval_examples = 10000 eval_batch_size = 100 num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) total_corr = 0 x_nat = cifar.eval_data.xs l_inf = np.amax(np.abs(x_nat - x_adv)) if l_inf > epsilon + 0.0001: print('maximum perturbation found: {}'.format(l_inf)) print('maximum perturbation allowed: {}'.format(epsilon)) return y_pred = [] # label accumulator with tf.Session() as sess: # Restore the checkpoint saver.restore(sess, checkpoint) # Iterate over the samples batch-by-batch for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = x_adv[bstart:bend, :] y_batch = cifar.eval_data.ys[bstart:bend] dict_adv = {model.x_input: x_batch, model.y_input: y_batch} cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions], feed_dict=dict_adv) total_corr += cur_corr y_pred.append(y_pred_batch) accuracy = total_corr / num_eval_examples print('Accuracy: {:.2f}%'.format(100.0 * accuracy)) y_pred = np.concatenate(y_pred, axis=0) np.save('pred.npy', y_pred) print('Output saved at pred.npy')