Python nets.inception.inception_v4_arg_scope() Examples
The following are 11
code examples of nets.inception.inception_v4_arg_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
nets.inception
, or try the search function
.
Example #1
Source File: inception_v4_test.py From MAX-Image-Segmenter with Apache License 2.0 | 5 votes |
def testNoBatchNormScaleByDefault(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with tf.contrib.slim.arg_scope(inception.inception_v4_arg_scope()): inception.inception_v4(inputs, num_classes, is_training=False) self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
Example #2
Source File: inception_v4_test.py From MAX-Image-Segmenter with Apache License 2.0 | 5 votes |
def testBatchNormScale(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with tf.contrib.slim.arg_scope( inception.inception_v4_arg_scope(batch_norm_scale=True)): inception.inception_v4(inputs, num_classes, is_training=False) gamma_names = set( v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) self.assertGreater(len(gamma_names), 0) for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
Example #3
Source File: pretrained.py From SSD_tensorflow_VOC with Apache License 2.0 | 5 votes |
def use_inceptionv4(self): image_size = inception.inception_v4.default_image_size img_path = "../../data/misec_images/EnglishCockerSpaniel_simon.jpg" checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt" with tf.Graph().as_default(): image_string = tf.read_file(img_path) image = tf.image.decode_jpeg(image_string, channels=3) processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False) processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v4_arg_scope()): logits, _ = inception.inception_v4(processed_images, num_classes=1001, is_training=False) probabilities = tf.nn.softmax(logits) init_fn = slim.assign_from_checkpoint_fn( checkpoint_path, slim.get_model_variables('InceptionV4')) with tf.Session() as sess: init_fn(sess) np_image, probabilities = sess.run([image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])] self.disp_names(sorted_inds,probabilities) plt.figure() plt.imshow(np_image.astype(np.uint8)) plt.axis('off') plt.title(img_path) plt.show() return
Example #4
Source File: pretrained.py From SSD_tensorflow_VOC with Apache License 2.0 | 5 votes |
def fine_tune_inception(self): train_dir = '/tmp/inception_finetuned/' image_size = inception.inception_v4.default_image_size checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt" flowers_data_dir = "../../data/flower" with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = self.load_batch(dataset, height=image_size, width=image_size) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v4_arg_scope()): logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True) # Specify the loss function: one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes) total_loss = slim.losses.softmax_cross_entropy(logits, one_hot_labels) # total_loss = slim.losses.get_total_loss(add_regularization_losses=False) # total_loss = slim.losses.get_total_loss() # Create some summaries to visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss) # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.001) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: number_of_steps = math.ceil(dataset.num_samples/32) * 1 final_loss = slim.learning.train( train_op, logdir=train_dir, init_fn=self.get_init_fn(checkpoint_path), number_of_steps=number_of_steps) print('Finished training. Last batch loss %f' % final_loss) return
Example #5
Source File: inception_v4_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def testNoBatchNormScaleByDefault(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with tf.contrib.slim.arg_scope(inception.inception_v4_arg_scope()): inception.inception_v4(inputs, num_classes, is_training=False) self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
Example #6
Source File: inception_v4_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def testBatchNormScale(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with tf.contrib.slim.arg_scope( inception.inception_v4_arg_scope(batch_norm_scale=True)): inception.inception_v4(inputs, num_classes, is_training=False) gamma_names = set( v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) self.assertGreater(len(gamma_names), 0) for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
Example #7
Source File: inception_v4_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testNoBatchNormScaleByDefault(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with tf.contrib.slim.arg_scope(inception.inception_v4_arg_scope()): inception.inception_v4(inputs, num_classes, is_training=False) self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
Example #8
Source File: inception_v4_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testBatchNormScale(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with tf.contrib.slim.arg_scope( inception.inception_v4_arg_scope(batch_norm_scale=True)): inception.inception_v4(inputs, num_classes, is_training=False) gamma_names = set( v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) self.assertGreater(len(gamma_names), 0) for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
Example #9
Source File: inception_v4_test.py From models with Apache License 2.0 | 5 votes |
def testNoBatchNormScaleByDefault(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with slim.arg_scope(inception.inception_v4_arg_scope()): inception.inception_v4(inputs, num_classes, is_training=False) self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
Example #10
Source File: inception_v4_test.py From models with Apache License 2.0 | 5 votes |
def testBatchNormScale(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with slim.arg_scope( inception.inception_v4_arg_scope(batch_norm_scale=True)): inception.inception_v4(inputs, num_classes, is_training=False) gamma_names = set( v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) self.assertGreater(len(gamma_names), 0) for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
Example #11
Source File: pretrained.py From SSD_tensorflow_VOC with Apache License 2.0 | 4 votes |
def use_fined_model(self): image_size = inception.inception_v4.default_image_size batch_size = 3 flowers_data_dir = "../../data/flower" train_dir = '/tmp/inception_finetuned/' with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, images_raw, labels = self.load_batch(dataset, height=image_size, width=image_size) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v4_arg_scope()): logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True) probabilities = tf.nn.softmax(logits) checkpoint_path = tf.train.latest_checkpoint(train_dir) init_fn = slim.assign_from_checkpoint_fn( checkpoint_path, slim.get_variables_to_restore()) with tf.Session() as sess: with slim.queues.QueueRunners(sess): sess.run(tf.initialize_local_variables()) init_fn(sess) np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels]) for i in range(batch_size): image = np_images_raw[i, :, :, :] true_label = np_labels[i] predicted_label = np.argmax(np_probabilities[i, :]) predicted_name = dataset.labels_to_names[predicted_label] true_name = dataset.labels_to_names[true_label] plt.figure() plt.imshow(image.astype(np.uint8)) plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name)) plt.axis('off') plt.show() return