Python tensorflow.contrib.slim.python.slim.nets.inception_v3.inception_v3_base() Examples
The following are 10
code examples of tensorflow.contrib.slim.python.slim.nets.inception_v3.inception_v3_base().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.slim.python.slim.nets.inception_v3
, or try the search function
.
Example #1
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildBaseNetwork(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) final_endpoint, end_points = inception_v3.inception_v3_base(inputs) self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c')) self.assertListEqual(final_endpoint.get_shape().as_list(), [batch_size, 8, 8, 2048]) expected_endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c' ] self.assertItemsEqual(end_points.keys(), expected_endpoints)
Example #2
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 299, 299 endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c' ] for index, endpoint in enumerate(endpoints): with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception_v3.inception_v3_base( inputs, final_endpoint=endpoint) self.assertTrue( out_tensor.op.name.startswith('InceptionV3/' + endpoint)) self.assertItemsEqual(endpoints[:index + 1], end_points)
Example #3
Source File: inception_v3_test.py From keras-lambda with MIT License | 6 votes |
def testBuildBaseNetwork(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) final_endpoint, end_points = inception_v3.inception_v3_base(inputs) self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c')) self.assertListEqual(final_endpoint.get_shape().as_list(), [batch_size, 8, 8, 2048]) expected_endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c' ] self.assertItemsEqual(end_points.keys(), expected_endpoints)
Example #4
Source File: inception_v3_test.py From keras-lambda with MIT License | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 299, 299 endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c' ] for index, endpoint in enumerate(endpoints): with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception_v3.inception_v3_base( inputs, final_endpoint=endpoint) self.assertTrue( out_tensor.op.name.startswith('InceptionV3/' + endpoint)) self.assertItemsEqual(endpoints[:index + 1], end_points)
Example #5
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testBuildAndCheckAllEndPointsUptoMixed7c(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = inception_v3.inception_v3_base( inputs, final_endpoint='Mixed_7c') endpoints_shapes = { 'Conv2d_1a_3x3': [batch_size, 149, 149, 32], 'Conv2d_2a_3x3': [batch_size, 147, 147, 32], 'Conv2d_2b_3x3': [batch_size, 147, 147, 64], 'MaxPool_3a_3x3': [batch_size, 73, 73, 64], 'Conv2d_3b_1x1': [batch_size, 73, 73, 80], 'Conv2d_4a_3x3': [batch_size, 71, 71, 192], 'MaxPool_5a_3x3': [batch_size, 35, 35, 192], 'Mixed_5b': [batch_size, 35, 35, 256], 'Mixed_5c': [batch_size, 35, 35, 288], 'Mixed_5d': [batch_size, 35, 35, 288], 'Mixed_6a': [batch_size, 17, 17, 768], 'Mixed_6b': [batch_size, 17, 17, 768], 'Mixed_6c': [batch_size, 17, 17, 768], 'Mixed_6d': [batch_size, 17, 17, 768], 'Mixed_6e': [batch_size, 17, 17, 768], 'Mixed_7a': [batch_size, 8, 8, 1280], 'Mixed_7b': [batch_size, 8, 8, 2048], 'Mixed_7c': [batch_size, 8, 8, 2048] } self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
Example #6
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) with arg_scope(inception_v3.inception_v3_arg_scope()): inception_v3.inception_v3_base(inputs) total_params, _ = model_analyzer.analyze_vars( variables_lib.get_model_variables()) self.assertAlmostEqual(21802784, total_params)
Example #7
Source File: inception_v3_test.py From keras-lambda with MIT License | 5 votes |
def testBuildAndCheckAllEndPointsUptoMixed7c(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = inception_v3.inception_v3_base( inputs, final_endpoint='Mixed_7c') endpoints_shapes = { 'Conv2d_1a_3x3': [batch_size, 149, 149, 32], 'Conv2d_2a_3x3': [batch_size, 147, 147, 32], 'Conv2d_2b_3x3': [batch_size, 147, 147, 64], 'MaxPool_3a_3x3': [batch_size, 73, 73, 64], 'Conv2d_3b_1x1': [batch_size, 73, 73, 80], 'Conv2d_4a_3x3': [batch_size, 71, 71, 192], 'MaxPool_5a_3x3': [batch_size, 35, 35, 192], 'Mixed_5b': [batch_size, 35, 35, 256], 'Mixed_5c': [batch_size, 35, 35, 288], 'Mixed_5d': [batch_size, 35, 35, 288], 'Mixed_6a': [batch_size, 17, 17, 768], 'Mixed_6b': [batch_size, 17, 17, 768], 'Mixed_6c': [batch_size, 17, 17, 768], 'Mixed_6d': [batch_size, 17, 17, 768], 'Mixed_6e': [batch_size, 17, 17, 768], 'Mixed_7a': [batch_size, 8, 8, 1280], 'Mixed_7b': [batch_size, 8, 8, 2048], 'Mixed_7c': [batch_size, 8, 8, 2048] } self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
Example #8
Source File: inception_v3_test.py From keras-lambda with MIT License | 5 votes |
def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) with arg_scope(inception_v3.inception_v3_arg_scope()): inception_v3.inception_v3_base(inputs) total_params, _ = model_analyzer.analyze_vars( variables_lib.get_model_variables()) self.assertAlmostEqual(21802784, total_params)
Example #9
Source File: model.py From gender-age-classification with Mozilla Public License 2.0 | 4 votes |
def inception_v3(nlabels, images, pkeep, is_training): batch_norm_params = { "is_training": is_training, "trainable": True, # Decay for the moving averages. "decay": 0.9997, # Epsilon to prevent 0s in variance. "epsilon": 0.001, # Collection containing the moving mean and moving variance. "variables_collections": { "beta": None, "gamma": None, "moving_mean": ["moving_vars"], "moving_variance": ["moving_vars"], } } weight_decay = 0.00004 stddev=0.1 weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay) with tf.variable_scope("InceptionV3", "InceptionV3", [images]) as scope: with tf.contrib.slim.arg_scope( [tf.contrib.slim.conv2d, tf.contrib.slim.fully_connected], weights_regularizer=weights_regularizer, trainable=True): with tf.contrib.slim.arg_scope( [tf.contrib.slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=stddev), activation_fn=tf.nn.relu, normalizer_fn=batch_norm, normalizer_params=batch_norm_params): net, end_points = inception_v3_base(images, scope=scope) with tf.variable_scope("logits"): shape = net.get_shape() net = avg_pool2d(net, shape[1:3], padding="VALID", scope="pool") net = tf.nn.dropout(net, pkeep, name='droplast') net = flatten(net, scope="flatten") with tf.variable_scope('output') as scope: weights = tf.Variable(tf.truncated_normal([2048, nlabels], mean=0.0, stddev=0.01), name='weights') biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases') output = tf.add(tf.matmul(net, weights), biases, name=scope.name) _activation_summary(output) return output
Example #10
Source File: model.py From gender-age-classification with Mozilla Public License 2.0 | 4 votes |
def inception_v3_test(nlabels, images, pkeep, is_training): batch_norm_params = { "is_training": is_training, "trainable": True, # Decay for the moving averages. "decay": 0.9997, # Epsilon to prevent 0s in variance. "epsilon": 0.001, # Collection containing the moving mean and moving variance. "variables_collections": { "beta": None, "gamma": None, "moving_mean": ["moving_vars"], "moving_variance": ["moving_vars"], } } weight_decay = 0.00004 stddev=0.1 weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay) with tf.variable_scope("InceptionV3", "InceptionV3", [images]) as scope: with tf.contrib.slim.arg_scope( [tf.contrib.slim.conv2d, tf.contrib.slim.fully_connected], weights_regularizer=weights_regularizer, trainable=True): with tf.contrib.slim.arg_scope( [tf.contrib.slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=stddev), activation_fn=tf.nn.relu, normalizer_fn=batch_norm, normalizer_params=batch_norm_params): net, end_points = inception_v3_base(images, scope=scope) with tf.variable_scope("logits"): shape = net.get_shape() net = avg_pool2d(net, shape[1:3], padding="VALID", scope="pool") net = tf.nn.dropout(net, pkeep, name='droplast') net = flatten(net, scope="flatten") with tf.variable_scope('output') as scope: weights = tf.Variable(tf.truncated_normal([2048, nlabels], mean=0.0, stddev=0.01), name='weights') biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases') output = tf.add(tf.matmul(net, weights), biases, name=scope.name) _activation_summary(output) return output,net