Python tensorflow.python.ops.random_ops.random_uniform() Examples
The following are 30
code examples of tensorflow.python.ops.random_ops.random_uniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.random_ops
, or try the search function
.
Example #1
Source File: image_ops_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def random_brightness(image, max_delta, seed=None): """Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Args: image: An image. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) for behavior. Returns: The brightness-adjusted image. Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_brightness(image, delta)
Example #2
Source File: clustering_ops.py From lambda-packs with MIT License | 6 votes |
def _init_clusters_random(self): """Does random initialization of clusters. Returns: Tensor of randomly initialized clusters. """ num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs]) # Note that for mini-batch k-means, we should ensure that the batch size of # data used during initialization is sufficiently large to avoid duplicated # clusters. with ops.control_dependencies( [check_ops.assert_less_equal(self._num_clusters, num_data)]): indices = random_ops.random_uniform( array_ops.reshape(self._num_clusters, [-1]), minval=0, maxval=math_ops.cast(num_data, dtypes.int64), seed=self._random_seed, dtype=dtypes.int64) clusters_init = embedding_lookup( self._inputs, indices, partition_strategy='div') return clusters_init
Example #3
Source File: gmm_ops.py From lambda-packs with MIT License | 6 votes |
def _init_clusters_random(data, num_clusters, random_seed): """Does random initialization of clusters. Args: data: a list of Tensors with a matrix of data, each row is an example. num_clusters: an integer with the number of clusters. random_seed: Seed for PRNG used to initialize seeds. Returns: A Tensor with num_clusters random rows of data. """ assert isinstance(data, list) num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data]) with ops.control_dependencies( [check_ops.assert_less_equal(num_clusters, num_data)]): indices = random_ops.random_uniform( [num_clusters], minval=0, maxval=math_ops.cast(num_data, dtypes.int64), seed=random_seed, dtype=dtypes.int64) indices %= math_ops.cast(num_data, dtypes.int64) clusters_init = embedding_lookup(data, indices, partition_strategy='div') return clusters_init
Example #4
Source File: exponential.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0) # Uniform variates must be sampled from the open-interval `(0, 1)` rather # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` # because it is the smallest, positive, "normal" number. A "normal" number # is such that the mantissa has an implicit leading 1. Normal, positive # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In # this case, a subnormal number (i.e., np.nextafter) can cause us to sample # 0. sampled = random_ops.random_uniform( shape, minval=np.finfo(self.dtype.as_numpy_dtype).tiny, maxval=1., seed=seed, dtype=self.dtype) return -math_ops.log(sampled) / self._rate
Example #5
Source File: init_ops.py From lambda-packs with MIT License | 6 votes |
def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans(scale_shape) if self.mode == "fan_in": scale /= max(1., fan_in) elif self.mode == "fan_out": scale /= max(1., fan_out) else: scale /= max(1., (fan_in + fan_out) / 2.) if self.distribution == "normal": stddev = math.sqrt(scale) return random_ops.truncated_normal(shape, 0.0, stddev, dtype, seed=self.seed) else: limit = math.sqrt(3.0 * scale) return random_ops.random_uniform(shape, -limit, limit, dtype, seed=self.seed)
Example #6
Source File: tf_image.py From seglink with GNU General Public License v3.0 | 6 votes |
def random_flip_left_right(image, bboxes, seed=None): """Random flip left-right of an image and its bounding boxes. """ def flip_bboxes(bboxes): """Flip bounding boxes coordinates. """ bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3], bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1) return bboxes # Random flip. Tensorflow implementation. with tf.name_scope('random_flip_left_right'): image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) # Flip image. result = control_flow_ops.cond(mirror_cond, lambda: array_ops.reverse_v2(image, [1]), lambda: image) # Flip bboxes. bboxes = control_flow_ops.cond(mirror_cond, lambda: flip_bboxes(bboxes), lambda: bboxes) return fix_image_flip_shape(image, result), bboxes
Example #7
Source File: init_ops.py From lambda-packs with MIT License | 6 votes |
def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape input_size = 1.0 # Estimating input size is not possible to do perfectly, but we try. # The estimate, obtained by multiplying all dimensions but the last one, # is the right thing for matrix multiply and convolutions (see above). for dim in scale_shape[:-1]: input_size *= float(dim) # Avoid errors when initializing zero-size tensors. input_size = max(input_size, 1.0) max_val = math.sqrt(3 / input_size) * self.factor return random_ops.random_uniform(shape, -max_val, max_val, dtype, seed=self.seed)
Example #8
Source File: laplace.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) # Uniform variates must be sampled from the open-interval `(-1, 1)` rather # than `[-1, 1)`. In the case of `(0, 1)` we'd use # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest, # positive, "normal" number. However, the concept of subnormality exists # only at zero; here we need the smallest usable number larger than -1, # i.e., `-1 + eps/2`. uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log1p(-math_ops.abs(uniform_samples)))
Example #9
Source File: image_ops_impl.py From lambda-packs with MIT License | 6 votes |
def random_brightness(image, max_delta, seed=None): """Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Args: image: An image. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See @{tf.set_random_seed} for behavior. Returns: The brightness-adjusted image. Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_brightness(image, delta)
Example #10
Source File: backend.py From lambda-packs with MIT License | 6 votes |
def random_binomial(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random binomial distribution of values. Arguments: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return array_ops.where( random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
Example #11
Source File: logistic.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # Uniform variates must be sampled from the open-interval `(0, 1)` rather # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` # because it is the smallest, positive, "normal" number. A "normal" number # is such that the mantissa has an implicit leading 1. Normal, positive # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In # this case, a subnormal number (i.e., np.nextafter) can cause us to sample # 0. uniform = random_ops.random_uniform( shape=array_ops.concat([[n], self.batch_shape_tensor()], 0), minval=np.finfo(self.dtype.as_numpy_dtype).tiny, maxval=1., dtype=self.dtype, seed=seed) sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform) return sampled * self.scale + self.loc
Example #12
Source File: geometric.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # Uniform variates must be sampled from the open-interval `(0, 1)` rather # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` # because it is the smallest, positive, "normal" number. A "normal" number # is such that the mantissa has an implicit leading 1. Normal, positive # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In # this case, a subnormal number (i.e., np.nextafter) can cause us to sample # 0. sampled = random_ops.random_uniform( array_ops.concat([[n], array_ops.shape(self._probs)], 0), minval=np.finfo(self.dtype.as_numpy_dtype).tiny, maxval=1., seed=seed, dtype=self.dtype) return math_ops.floor( math_ops.log(sampled) / math_ops.log1p(-self.probs))
Example #13
Source File: gumbel.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # Uniform variates must be sampled from the open-interval `(0, 1)` rather # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` # because it is the smallest, positive, "normal" number. A "normal" number # is such that the mantissa has an implicit leading 1. Normal, positive # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In # this case, a subnormal number (i.e., np.nextafter) can cause us to sample # 0. uniform = random_ops.random_uniform( shape=array_ops.concat([[n], self.batch_shape_tensor()], 0), minval=np.finfo(self.dtype.as_numpy_dtype).tiny, maxval=1., dtype=self.dtype, seed=seed) sampled = -math_ops.log(-math_ops.log(uniform)) return sampled * self.scale + self.loc
Example #14
Source File: image_ops_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def random_flip_up_down(image, seed=None): """Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of `image` flipped along the first dimension, which is `height`. Otherwise output the image as-is. Args: image: A 3-D tensor of shape `[height, width, channels].` seed: A Python integer. Used to create a random seed. See [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) for behavior. Returns: A 3-D tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) stride = array_ops.where(mirror_cond, -1, 1) result = image[::stride, :, :] return fix_image_flip_shape(image, result)
Example #15
Source File: image_ops_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def random_flip_left_right(image, seed=None): """Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of `image` flipped along the second dimension, which is `width`. Otherwise output the image as-is. Args: image: A 3-D tensor of shape `[height, width, channels].` seed: A Python integer. Used to create a random seed. See [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) for behavior. Returns: A 3-D tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) stride = array_ops.where(mirror_cond, -1, 1) result = image[:, ::stride, :] return fix_image_flip_shape(image, result)
Example #16
Source File: train_crnn.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 6 votes |
def distort_color(image, color_ordering=0, scope=None): """ 随机进行图像增强(亮度、对比度操作) :param image: 输入图片 :param color_ordering:模式 :param scope: 命名空间 :return: 增强后的图片 """ with tf.name_scope(scope, 'distort_color', [image]): if color_ordering == 0: # 模式0.先调整亮度,再调整对比度 rand_temp = random_ops.random_uniform([], -55, 20, seed=None) # [-70, 30] for generate img, [-50, 20] for true img image = math_ops.add(image, math_ops.cast(rand_temp, dtypes.float32)) image = tf.image.random_contrast(image, lower=0.45, upper=1.5) # [0.3, 1.75] for generate img, [0.45, 1.5] for true img else: image = tf.image.random_contrast(image, lower=0.45, upper=1.5) rand_temp = random_ops.random_uniform([], -55, 30, seed=None) image = math_ops.add(image, math_ops.cast(rand_temp, dtypes.float32)) # The random_* ops do not necessarily clamp. print(color_ordering) return tf.clip_by_value(image, 0.0, 255.0) # 限定在0-255 ##########################################################################
Example #17
Source File: init_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape input_size = 1.0 # Estimating input size is not possible to do perfectly, but we try. # The estimate, obtained by multiplying all dimensions but the last one, # is the right thing for matrix multiply and convolutions (see above). for dim in scale_shape[:-1]: input_size *= float(dim) # Avoid errors when initializing zero-size tensors. input_size = max(input_size, 1.0) max_val = math.sqrt(3 / input_size) * self.factor return random_ops.random_uniform(shape, -max_val, max_val, dtype, seed=self.seed)
Example #18
Source File: init_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans(scale_shape) if self.mode == "fan_in": scale /= max(1., fan_in) elif self.mode == "fan_out": scale /= max(1., fan_out) else: scale /= max(1., (fan_in + fan_out) / 2.) if self.distribution == "normal": stddev = math.sqrt(scale) return random_ops.truncated_normal(shape, 0.0, stddev, dtype, seed=self.seed) else: limit = math.sqrt(3.0 * scale) return random_ops.random_uniform(shape, -limit, limit, dtype, seed=self.seed)
Example #19
Source File: overfeat_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 231, 231 eval_height, eval_width = 281, 281 num_classes = 1000 with self.test_session(): train_inputs = random_ops.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = overfeat.overfeat(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) variable_scope.get_variable_scope().reuse_variables() eval_inputs = random_ops.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = overfeat.overfeat( eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = math_ops.reduce_mean(logits, [1, 2]) predictions = math_ops.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Example #20
Source File: vgg_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testEndPoints(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 for is_training in [True, False]: with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training) expected_names = [ 'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1', 'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2', 'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2', 'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2', 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8' ] self.assertSetEqual(set(end_points.keys()), set(expected_names))
Example #21
Source File: vgg_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 224, 224 eval_height, eval_width = 256, 256 num_classes = 1000 with self.test_session(): train_inputs = random_ops.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = vgg.vgg_a(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) variable_scope.get_variable_scope().reuse_variables() eval_inputs = random_ops.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = vgg.vgg_a( eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = math_ops.reduce_mean(logits, [1, 2]) predictions = math_ops.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Example #22
Source File: vgg_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testEndPoints(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 for is_training in [True, False]: with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training) expected_names = [ 'vgg_16/conv1/conv1_1', 'vgg_16/conv1/conv1_2', 'vgg_16/pool1', 'vgg_16/conv2/conv2_1', 'vgg_16/conv2/conv2_2', 'vgg_16/pool2', 'vgg_16/conv3/conv3_1', 'vgg_16/conv3/conv3_2', 'vgg_16/conv3/conv3_3', 'vgg_16/pool3', 'vgg_16/conv4/conv4_1', 'vgg_16/conv4/conv4_2', 'vgg_16/conv4/conv4_3', 'vgg_16/pool4', 'vgg_16/conv5/conv5_1', 'vgg_16/conv5/conv5_2', 'vgg_16/conv5/conv5_3', 'vgg_16/pool5', 'vgg_16/fc6', 'vgg_16/fc7', 'vgg_16/fc8' ] self.assertSetEqual(set(end_points.keys()), set(expected_names))
Example #23
Source File: vgg_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 224, 224 eval_height, eval_width = 256, 256 num_classes = 1000 with self.test_session(): train_inputs = random_ops.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = vgg.vgg_16(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) variable_scope.get_variable_scope().reuse_variables() eval_inputs = random_ops.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = vgg.vgg_16( eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = math_ops.reduce_mean(logits, [1, 2]) predictions = math_ops.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Example #24
Source File: vgg_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 224, 224 eval_height, eval_width = 256, 256 num_classes = 1000 with self.test_session(): train_inputs = random_ops.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = vgg.vgg_19(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) variable_scope.get_variable_scope().reuse_variables() eval_inputs = random_ops.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = vgg.vgg_19( eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = math_ops.reduce_mean(logits, [1, 2]) predictions = math_ops.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Example #25
Source File: inception_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = [ 'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c' ] for index, endpoint in enumerate(endpoints): with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception_v2.inception_v2_base( inputs, final_endpoint=endpoint) self.assertTrue( out_tensor.op.name.startswith('InceptionV2/' + endpoint)) self.assertItemsEqual(endpoints[:index + 1], end_points)
Example #26
Source File: inception_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = inception_v2.inception_v2(inputs, num_classes) endpoint_keys = [ key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv') ] _, end_points_with_multiplier = inception_v2.inception_v2( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(0.5 * original_depth, new_depth)
Example #27
Source File: inception_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = inception_v2.inception_v2(inputs, num_classes) endpoint_keys = [ key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv') ] _, end_points_with_multiplier = inception_v2.inception_v2( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth)
Example #28
Source File: inception_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = random_ops.random_uniform( (train_batch_size, height, width, 3)) inception_v2.inception_v2(train_inputs, num_classes) eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception_v2.inception_v2(eval_inputs, num_classes, reuse=True) predictions = math_ops.argmax(logits, 1) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #29
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildBaseNetwork(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) final_endpoint, end_points = inception_v3.inception_v3_base(inputs) self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c')) self.assertListEqual(final_endpoint.get_shape().as_list(), [batch_size, 8, 8, 2048]) expected_endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c' ] self.assertItemsEqual(end_points.keys(), expected_endpoints)
Example #30
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 299, 299 endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c' ] for index, endpoint in enumerate(endpoints): with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception_v3.inception_v3_base( inputs, final_endpoint=endpoint) self.assertTrue( out_tensor.op.name.startswith('InceptionV3/' + endpoint)) self.assertItemsEqual(endpoints[:index + 1], end_points)