Python nets.inception_resnet_v2.inception_resnet_v2() Examples

The following are 5 code examples of nets.inception_resnet_v2.inception_resnet_v2(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module nets.inception_resnet_v2 , or try the search function .
Example #1
Source File: target_attack.py    From Targeted-Adversarial-Attack with Apache License 2.0 5 votes vote down vote up
def graph_small(x, target_class_input, i, x_max, x_min, grad):
  eps = 2.0 * FLAGS.max_epsilon / 255.0
  alpha = eps / 28
  momentum = FLAGS.momentum
  num_classes = 1001

  with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits_v3, end_points_v3 = inception_v3.inception_v3(
        x, num_classes=num_classes, is_training=False)

  with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
    logits_ensadv_res_v2, end_points_ensadv_res_v2 = inception_resnet_v2.inception_resnet_v2(
        x, num_classes=num_classes, is_training=False, scope='EnsAdvInceptionResnetV2')
            
  one_hot_target_class = tf.one_hot(target_class_input, num_classes)

  logits = (logits_v3 + 2 * logits_ensadv_res_v2) / 3
  auxlogits = (end_points_v3['AuxLogits'] + 2 * end_points_ensadv_res_v2['AuxLogits']) / 3
  cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class,
                                                  logits,
                                                  label_smoothing=0.0,
                                                  weights=1.0)
  cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class,
                                                   auxlogits,
                                                   label_smoothing=0.0,
                                                   weights=0.4)
  noise = tf.gradients(cross_entropy, x)[0]
  noise = noise / tf.reshape(tf.contrib.keras.backend.std(tf.reshape(noise, [FLAGS.batch_size, -1]), axis=1), [FLAGS.batch_size, 1, 1, 1])
  noise = momentum * grad + noise
  noise = noise / tf.reshape(tf.contrib.keras.backend.std(tf.reshape(noise, [FLAGS.batch_size, -1]), axis=1), [FLAGS.batch_size, 1, 1, 1])
  x = x - alpha * tf.clip_by_value(tf.round(noise), -2, 2)
  x = tf.clip_by_value(x, x_min, x_max)
  i = tf.add(i, 1)
  return x, target_class_input, i, x_max, x_min, noise 
Example #2
Source File: model.py    From ICPR_TextDection with GNU General Public License v3.0 4 votes vote down vote up
def model(images, weight_decay=1e-5, is_training=True):
    images = mean_image_subtraction(images)
    with slim.arg_scope(inception_arg_scope(weight_decay=weight_decay)):
        logits, end_points = inception_resnet_v2(images, num_classes=None, is_training=is_training)
    for key in end_points.keys():
        print(key, end_points[key])
    return logits, end_points
    # print(end_points.keys())
    # with tf.variable_scope('feature_fusion', values=[end_points.values()]):
    #     batch_norm_params = {
    #         'decay': 0.997,
    #         'epsilon': 1e-5,
    #         'scale': True,
    #         'is_training': is_training
    #     }
    #     with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
    #                         normalizer_params=batch_norm_params, weights_regularizer=slim.l2_regularizer(weight_decay)):
    #         f = [end_points['Scale-5'],     # 16
    #              end_points['Scale-4'],  # 32
    #              end_points['Scale-3'],  # 64
    #              end_points['Scale-2'],     # 128
    #              end_points['Scale-1']]     # 256
    #         g = [None, None, None, None, None]
    #         h = [None, None, None, None, None]
    #         num_outputs = [None, 1024, 128, 64, 32]
    #         for i in range(5):
    #             if i == 0:
    #                 h[i] = f[i]
    #             else:
    #                 # 相当于一个融合,减少维度的过程,kernel size等于1
    #                 c1_1 = slim.conv2d(tf.concat([g[i-1], f[i]], axis=-1), num_outputs=num_outputs[i], kernel_size=1)
    #                 h[i] = slim.conv2d(c1_1, num_outputs=num_outputs[i], kernel_size=3)
    #             if i <= 3:
    #                 g[i] = unpool(h[i])
    #                 # g[i] = slim.conv2d(g[i], num_outputs[i + 1], 1)
    #                 # g[i] = slim.conv2d(g[i], num_outputs[i + 1], 3)
    #             else:
    #                 g[i] = slim.conv2d(h[i], num_outputs[i], 3)
    #             print("Shape of f_{} {}, h_{} {}, g_{} {}".format(i, f[i].shape, i, h[i].shape, i, g[i].shape))
    #         F_score = slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None)
    #         if FLAGS.geometry == 'RBOX':
    #             # 4 channel of axis aligned bbox and 1 channel rotation angle
    #             print 'RBOX'
    #             geo_map = slim.conv2d(g[4], 4, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) * FLAGS.text_scale
    #             angle_map = (slim.conv2d(g[4], 1, 1, activation_fn=tf.nn.sigmoid,
    #                                      normalizer_fn=None) - 0.5) * np.pi / 2  # angle is between [-45, 45]
    #             F_geometry = tf.concat([geo_map, angle_map], axis=-1)
    #         else:
    #             # LD modify
    #             # concated_score_map = tf.concat([F_score, g[3]], axis=-1)
    #             # F_geometry = slim.conv2d(g[4], 8, 1, activation_fn=parametric_relu,
    #             #                          normalizer_fn=None) * FLAGS.text_scale
    #             assert False
    #     return F_score, F_geometry 
Example #3
Source File: target_attack.py    From Targeted-Adversarial-Attack with Apache License 2.0 4 votes vote down vote up
def graph_large(x, target_class_input, i, x_max, x_min, grad):
  eps = 2.0 * FLAGS.max_epsilon / 255.0
  alpha = eps / 12
  momentum = FLAGS.momentum
  num_classes = 1001

  with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits_v3, end_points_v3 = inception_v3.inception_v3(
        x, num_classes=num_classes, is_training=False)

  with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits_adv_v3, end_points_adv_v3 = inception_v3.inception_v3(
        x, num_classes=num_classes, is_training=False, scope='AdvInceptionV3')

  with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits_ens3_adv_v3, end_points_ens3_adv_v3 = inception_v3.inception_v3(
        x, num_classes=num_classes, is_training=False, scope='Ens3AdvInceptionV3')

  with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits_ens4_adv_v3, end_points_ens4_adv_v3 = inception_v3.inception_v3(
        x, num_classes=num_classes, is_training=False, scope='Ens4AdvInceptionV3')

  with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
    logits_ensadv_res_v2, end_points_ensadv_res_v2 = inception_resnet_v2.inception_resnet_v2(
        x, num_classes=num_classes, is_training=False, scope='EnsAdvInceptionResnetV2')
            
  one_hot_target_class = tf.one_hot(target_class_input, num_classes)

  logits = (4 * logits_v3 + logits_adv_v3 + logits_ens3_adv_v3 + logits_ens4_adv_v3 + 4 * logits_ensadv_res_v2) / 11
  auxlogits = (4 * end_points_v3['AuxLogits'] + end_points_adv_v3['AuxLogits'] + end_points_ens3_adv_v3['AuxLogits'] + end_points_ens4_adv_v3['AuxLogits'] + 4 * end_points_ensadv_res_v2['AuxLogits']) / 11
  cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class,
                                                  logits,
                                                  label_smoothing=0.0,
                                                  weights=1.0)
  cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class,
                                                   auxlogits,
                                                   label_smoothing=0.0,
                                                   weights=0.4)
  noise = tf.gradients(cross_entropy, x)[0]
  noise = noise / tf.reshape(tf.contrib.keras.backend.std(tf.reshape(noise, [FLAGS.batch_size, -1]), axis=1), [FLAGS.batch_size, 1, 1, 1])
  noise = momentum * grad + noise
  noise = noise / tf.reshape(tf.contrib.keras.backend.std(tf.reshape(noise, [FLAGS.batch_size, -1]), axis=1), [FLAGS.batch_size, 1, 1, 1])
  x = x - alpha * tf.clip_by_value(tf.round(noise), -2, 2)
  x = tf.clip_by_value(x, x_min, x_max)
  i = tf.add(i, 1)
  return x, target_class_input, i, x_max, x_min, noise 
Example #4
Source File: attack_iter.py    From Translation-Invariant-Attacks with Apache License 2.0 4 votes vote down vote up
def graph(x, y, i, x_max, x_min, grad):
    eps = 2.0 * FLAGS.max_epsilon / 255.0
    num_iter = FLAGS.num_iter
    alpha = eps / num_iter
    momentum = FLAGS.momentum
    num_classes = 1001

    # should keep original x here for output

    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits_v3, end_points_v3 = inception_v3.inception_v3(
            input_diversity(x), num_classes=num_classes, is_training=False)

    with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
        logits_v4, end_points_v4 = inception_v4.inception_v4(
            input_diversity(x), num_classes=num_classes, is_training=False)

    with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
        logits_res_v2, end_points_res_v2 = inception_resnet_v2.inception_resnet_v2(
            input_diversity(x), num_classes=num_classes, is_training=False, reuse=True)

    with slim.arg_scope(resnet_v2.resnet_arg_scope()):
        logits_resnet, end_points_resnet = resnet_v2.resnet_v2_152(
            input_diversity(x), num_classes=num_classes, is_training=False)

    logits = (logits_v3 + logits_v4 + logits_res_v2 + logits_resnet) / 4
    auxlogits = (end_points_v3['AuxLogits'] + end_points_v4['AuxLogits'] + end_points_res_v2['AuxLogits']) / 3
    cross_entropy = tf.losses.softmax_cross_entropy(y,
                                                    logits,
                                                    label_smoothing=0.0,
                                                    weights=1.0)
    cross_entropy += tf.losses.softmax_cross_entropy(y,
                                                     auxlogits,
                                                     label_smoothing=0.0,
                                                     weights=0.4)
    noise = tf.gradients(cross_entropy, x)[0]
    noise = tf.nn.depthwise_conv2d(noise, stack_kernel, strides=[1, 1, 1, 1], padding='SAME')
    noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True)
    noise = momentum * grad + noise
    x = x + alpha * tf.sign(noise)
    x = tf.clip_by_value(x, x_min, x_max)
    i = tf.add(i, 1)
    return x, y, i, x_max, x_min, noise 
Example #5
Source File: attack_iter.py    From Translation-Invariant-Attacks with Apache License 2.0 4 votes vote down vote up
def main(_):
    # Images for inception classifier are normalized to be in [-1, 1] interval,
    # eps is a difference between pixels so it should be in [0, 2] interval.
    # Renormalizing epsilon from [0, 255] to [0, 2].
    eps = 2.0 * FLAGS.max_epsilon / 255.0
    num_classes = 1001
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]

    tf.logging.set_verbosity(tf.logging.INFO)

    print(time.time() - start_time)

    with tf.Graph().as_default():
        # Prepare graph
        x_input = tf.placeholder(tf.float32, shape=batch_shape)
        x_max = tf.clip_by_value(x_input + eps, -1.0, 1.0)
        x_min = tf.clip_by_value(x_input - eps, -1.0, 1.0)

        with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
            _, end_points = inception_resnet_v2.inception_resnet_v2(
                x_input, num_classes=num_classes, is_training=False)

        predicted_labels = tf.argmax(end_points['Predictions'], 1)
        y = tf.one_hot(predicted_labels, num_classes)

        i = tf.constant(0)
        grad = tf.zeros(shape=batch_shape)
        x_adv, _, _, _, _, _ = tf.while_loop(stop, graph, [x_input, y, i, x_max, x_min, grad])

        # Run computation
        s1 = tf.train.Saver(slim.get_model_variables(scope='InceptionV3'))
        s5 = tf.train.Saver(slim.get_model_variables(scope='InceptionV4'))
        s6 = tf.train.Saver(slim.get_model_variables(scope='InceptionResnetV2'))
        s8 = tf.train.Saver(slim.get_model_variables(scope='resnet_v2'))

        with tf.Session() as sess:
            s1.restore(sess, FLAGS.checkpoint_path_inception_v3)
            s5.restore(sess, FLAGS.checkpoint_path_inception_v4)
            s6.restore(sess, FLAGS.checkpoint_path_inception_resnet_v2)
            s8.restore(sess, FLAGS.checkpoint_path_resnet)
            print(time.time() - start_time)

            for filenames, images in load_images(FLAGS.input_dir, batch_shape):
                adv_images = sess.run(x_adv, feed_dict={x_input: images})
                save_images(adv_images, filenames, FLAGS.output_dir)

        print(time.time() - start_time)