Python tensorflow.batch_to_space() Examples
The following are 30
code examples of tensorflow.batch_to_space().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: layers.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def upscale(images, scale): """Box upscaling (also called nearest neighbors) of images. Args: images: A 4D `Tensor` in NHWC format. scale: A positive integer scale. Returns: A 4D `Tensor` of `images` up scaled by a factor `scale`. Raises: ValueError: If `scale` is not a positive integer. """ scale = _get_validated_scale(scale) if scale == 1: return images return tf.batch_to_space( tf.tile(images, [scale**2, 1, 1, 1]), crops=[[0, 0], [0, 0]], block_size=scale)
Example #2
Source File: layers.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def upscale(images, scale): """Box upscaling (also called nearest neighbors) of images. Args: images: A 4D `Tensor` in NHWC format. scale: A positive integer scale. Returns: A 4D `Tensor` of `images` up scaled by a factor `scale`. Raises: ValueError: If `scale` is not a positive integer. """ scale = _get_validated_scale(scale) if scale == 1: return images return tf.batch_to_space( tf.tile(images, [scale**2, 1, 1, 1]), crops=[[0, 0], [0, 0]], block_size=scale)
Example #3
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def _checkGrad(self, x, crops, block_size): assert 4 == x.ndim with self.test_session(): tf_x = tf.convert_to_tensor(x) tf_y = self.batch_to_space(tf_x, crops, block_size) epsilon = 1e-5 ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient( tf_x, x.shape, tf_y, tf_y.get_shape().as_list(), x_init_value=x, delta=epsilon) self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon) # Tests a gradient for batch_to_space of x which is a four dimensional # tensor of shape [b * block_size * block_size, h, w, d].
Example #4
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testUnknownShape(self): t = self.batch_to_space( tf.placeholder(tf.float32), tf.placeholder(tf.int32), block_size=4) self.assertEqual(4, t.get_shape().ndims)
Example #5
Source File: layers.py From BEGAN-tensorflow with Apache License 2.0 | 5 votes |
def unboxn(vin, n): """vin = (batch, h, w, depth), returns vout = (batch, n*h, n*w, depth), each pixel is duplicated.""" s = tf.shape(vin) vout = tf.concat([vin] * (n ** 2), 0) # Poor man's replacement for tf.tile (required for Adversarial Training support). vout = tf.reshape(vout, [s[0] * (n ** 2), s[1], s[2], s[3]]) vout = tf.batch_to_space(vout, [[0, 0], [0, 0]], n) return vout
Example #6
Source File: models_resnet.py From gan with GNU General Public License v3.0 | 5 votes |
def upscale(x, n): """Builds box upscaling (also called nearest neighbors). Args: x: 4D image tensor in B01C format. n: integer scale (must be a power of 2). Returns: 4D tensor of images up scaled by a factor n. """ if n == 1: return x return tf.batch_to_space(tf.tile(x, [n**2, 1, 1, 1]), [[0, 0], [0, 0]], n)
Example #7
Source File: util.py From spatial-transformer-GAN with MIT License | 5 votes |
def imageSummary(opt,image,tag,H,W): blockSize = opt.visBlockSize imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize) imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1]) imageTransp = tf.transpose(imagePermute,[1,0,3,2,4]) imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1]) summary = tf.summary.image(tag,imageBlocks) return summary # restore model
Example #8
Source File: util.py From spatial-transformer-GAN with MIT License | 5 votes |
def imageSummary(opt,image,tag,H,W): blockSize = opt.visBlockSize imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize) imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1]) imageTransp = tf.transpose(imagePermute,[1,0,3,2,4]) imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1]) summary = tf.summary.image(tag,imageBlocks) return summary # restore model
Example #9
Source File: util.py From inverse-compositional-STN with MIT License | 5 votes |
def imageSummary(opt,image,tag,H,W): blockSize = opt.visBlockSize imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize) imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1]) imageTransp = tf.transpose(imagePermute,[1,0,3,2,4]) imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1]) imageBlocks = tf.cast(imageBlocks*255,tf.uint8) summary = tf.summary.image(tag,imageBlocks) return summary # make image summary from image batch (mean/variance)
Example #10
Source File: util.py From inverse-compositional-STN with MIT License | 5 votes |
def imageSummary(opt,image,tag,H,W): blockSize = opt.visBlockSize imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize) imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1]) imageTransp = tf.transpose(imagePermute,[1,0,3,2,4]) imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1]) imageBlocks = tf.cast(imageBlocks*255,tf.uint8) summary = tf.summary.image(tag,imageBlocks) return summary # make image summary from image batch (mean/variance)
Example #11
Source File: generator.py From self-attention-gan with Apache License 2.0 | 5 votes |
def upscale(x, n): """Builds box upscaling (also called nearest neighbors). Args: x: 4D image tensor in B01C format. n: integer scale (must be a power of 2). Returns: 4D tensor of images up scaled by a factor n. """ if n == 1: return x return tf.batch_to_space(tf.tile(x, [n**2, 1, 1, 1]), [[0, 0], [0, 0]], n)
Example #12
Source File: layers.py From acai with Apache License 2.0 | 5 votes |
def upscale2d(x, n): """Box upscaling (also called nearest neighbors). Args: x: 4D tensor in NHWC format. n: integer scale (must be a power of 2). Returns: 4D tensor up scaled by a factor n. """ if n == 1: return x return tf.batch_to_space(tf.tile(x, [n**2, 1, 1, 1]), [[0, 0], [0, 0]], n)
Example #13
Source File: spacetobatch_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): # outputs = space_to_batch(inputs) x_tf = tf.space_to_batch_nd(tf.to_float(inputs), block_shape, paddings) self.assertAllEqual(x_tf.eval(), outputs) # inputs = batch_to_space(outputs) x_tf = tf.batch_to_space_nd(tf.to_float(outputs), block_shape, paddings) self.assertAllEqual(x_tf.eval(), inputs)
Example #14
Source File: spacetobatch_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _testPad(self, inputs, paddings, block_size, outputs): with self.test_session(use_gpu=True): # outputs = space_to_batch(inputs) x_tf = self.space_to_batch( tf.to_float(inputs), paddings, block_size=block_size) self.assertAllEqual(x_tf.eval(), outputs) # inputs = batch_to_space(outputs) x_tf = self.batch_to_space( tf.to_float(outputs), paddings, block_size=block_size) self.assertAllEqual(x_tf.eval(), inputs)
Example #15
Source File: spacetobatch_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def batch_to_space(*args, **kwargs): return gen_array_ops._batch_to_space(*args, **kwargs)
Example #16
Source File: model_mmd.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def imageRearrange(self, image, block=4): image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1]) x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block) image_r = tf.reshape(tf.transpose(tf.reshape(x1, [self.output_size, block, self.output_size, block, self.c_dim]) , [1, 0, 3, 2, 4]), [1, self.output_size * block, self.output_size * block, self.c_dim]) return image_r
Example #17
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testBlockSizeSquaredNotDivisibleBatch(self): # The block size squared does not divide the batch. x_np = [[[[1], [2], [3]], [[3], [4], [7]]]] crops = np.zeros((2, 2), dtype=np.int32) block_size = 3 with self.assertRaises(ValueError): _ = self.batch_to_space(x_np, crops, block_size)
Example #18
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testBlockSizeLarger(self): # The block size is too large for this input. x_np = [[[[1], [2]], [[3], [4]]]] crops = np.zeros((2, 2), dtype=np.int32) block_size = 10 with self.assertRaises(ValueError): out_tf = self.batch_to_space(x_np, crops, block_size) out_tf.eval()
Example #19
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testBlockSizeOne(self): # The block size is 1. The block size needs to be > 1. x_np = [[[[1], [2]], [[3], [4]]]] crops = np.zeros((2, 2), dtype=np.int32) block_size = 1 with self.assertRaises(ValueError): out_tf = self.batch_to_space(x_np, crops, block_size) out_tf.eval()
Example #20
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testInputWrongDimMissingBatch(self): # The input is missing the first dimension ("batch") x_np = [[[1], [2]], [[3], [4]]] crops = np.zeros((2, 2), dtype=np.int32) block_size = 2 with self.assertRaises(ValueError): _ = self.batch_to_space(x_np, crops, block_size)
Example #21
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testDepthToSpaceTranspose(self): x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7]) block_size = 2 crops = np.zeros((2, 2), dtype=np.int32) y1 = self.batch_to_space(x, crops, block_size=block_size) y2 = tf.transpose( tf.depth_to_space( tf.transpose(x, [3, 1, 2, 0]), block_size=block_size), [3, 1, 2, 0]) with self.test_session(): self.assertAllEqual(y1.eval(), y2.eval())
Example #22
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def batch_to_space(*args, **kwargs): return gen_array_ops._batch_to_space(*args, **kwargs)
Example #23
Source File: batchtospace_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def batch_to_space(*args, **kwargs): return tf.batch_to_space(*args, **kwargs)
Example #24
Source File: sliced_wasserstein.py From ashpy with Apache License 2.0 | 5 votes |
def batch_to_space(*args, **kwargs): """Call tf.batch_to_space using the correct arguments.""" try: return tf.batch_to_space(*args, **kwargs) except TypeError: if "block_shape" in kwargs: kwargs["block_size"] = kwargs["block_shape"] del kwargs["block_shape"] return tf.batch_to_space(*args, **kwargs)
Example #25
Source File: util.py From 3D-point-cloud-generation with MIT License | 5 votes |
def imageSummary(opt,tag,image,H,W): blockSize = opt.visBlockSize imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize) imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1]) imageTransp = tf.transpose(imagePermute,[1,0,3,2,4]) imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1]) summary = tf.summary.image(tag,imageBlocks) return summary # restore model
Example #26
Source File: image_models.py From taco with GNU General Public License v3.0 | 5 votes |
def conv_layers_2(x, bs, max_seq_len, im_dim): #x = tf.cast(x, tf.float32) wid, hei, chan = im_dim[0], im_dim[1], im_dim[2] try: x = tf.reshape(x, [bs * max_seq_len, wid, hei, chan]) except: print('image dimensions not compatible') raise rate = 3 rem_wid, rem_hei = rate * 2 - (wid % (rate * 2)) , rate * 2 - (hei % (rate * 2)) pad = tf.constant([[np.floor(rem_hei / 2), np.ceil(rem_hei / 2)], [np.floor(rem_wid / 2), np.ceil(rem_wid / 2)]]) pad = tf.cast(pad, tf.int32) filters1 = tf.Variable(tf.random_normal([3,3,3,5]), dtype=tf.float32) filters2 = tf.Variable(tf.random_normal([3,3,5,5]), dtype=tf.float32) filters3 = tf.Variable(tf.random_normal([3,3,5,5]), dtype=tf.float32) net = space_to_batch(x,paddings=pad,block_size=rate) net = tf.nn.conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME", name="dil_conv_1") # print "dil_conv_1" # print net.get_shape() # net = tf.nn.conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME", name="dil_conv_2") # print "dil_conv_2" # print net.get_shape() net = tf.nn.conv2d(net, filters3, strides=[1, 1, 1, 1], padding="SAME", name="dil_conv_3") # print "dil_conv_3" # print net.get_shape() net = batch_to_space(net, crops=pad, block_size=rate) # print "final_output" # print net.get_shape() output = tf.layers.flatten(net) # print "output" # print output.get_shape() return output
Example #27
Source File: model_tmmd.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def imageRearrange(self, image, block=4): image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1]) x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block) image_r = tf.reshape(tf.transpose(tf.reshape(x1, [self.output_size, block, self.output_size, block, self.c_dim]) , [1, 0, 3, 2, 4]), [1, self.output_size * block, self.output_size * block, self.c_dim]) return image_r
Example #28
Source File: model_mmd_fm.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def imageRearrange(self, image, block=4): image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1]) x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block) image_r = tf.reshape(tf.transpose(tf.reshape(x1, [self.output_size, block, self.output_size, block, self.c_dim]) , [1, 0, 3, 2, 4]), [1, self.output_size * block, self.output_size * block, self.c_dim]) return image_r
Example #29
Source File: atrous_conv2d_test.py From deep_image_model with Apache License 2.0 | 4 votes |
def testAtrousSequence(self): """Tests optimization of sequence of atrous convolutions. Verifies that a sequence of `atrous_conv2d` operations with identical `rate` parameters, 'SAME' `padding`, and `filters` with odd heights/ widths: net = atrous_conv2d(net, filters1, rate, padding="SAME") net = atrous_conv2d(net, filters2, rate, padding="SAME") ... net = atrous_conv2d(net, filtersK, rate, padding="SAME") is equivalent to: pad = ... # padding so that the input dims are multiples of rate net = space_to_batch(net, paddings=pad, block_size=rate) net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME") net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME") ... net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME") net = batch_to_space(net, crops=pad, block_size=rate) """ padding = "SAME" # The padding needs to be "SAME" np.random.seed(1) # Make it reproducible. with self.test_session(use_gpu=True): # Input: [batch, height, width, input_depth] for height in range(15, 17): for width in range(15, 17): x_shape = [3, height, width, 2] x = np.random.random_sample(x_shape).astype(np.float32) for kernel in [1, 3, 5]: # The kernel size needs to be odd. # Filter: [kernel_height, kernel_width, input_depth, output_depth] f_shape = [kernel, kernel, 2, 2] f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32) for rate in range(2, 4): # y1: three atrous_conv2d in a row. y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding) y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding) y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding) # y2: space_to_batch, three conv2d in a row, batch_to_space pad_bottom = 0 if height % rate == 0 else rate - height % rate pad_right = 0 if width % rate == 0 else rate - width % rate pad = [[0, pad_bottom], [0, pad_right]] y2 = tf.space_to_batch(x, paddings=pad, block_size=rate) y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding) y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding) y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding) y2 = tf.batch_to_space(y2, crops=pad, block_size=rate) self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
Example #30
Source File: sliced_wasserstein.py From ashpy with Apache License 2.0 | 4 votes |
def laplacian_pyramid(batch, num_levels): """ Compute a Laplacian pyramid. Args: batch: (tensor) The batch of images (batch, height, width, channels). num_levels: (int) Desired number of hierarchical levels. Returns: List of tensors from the highest to lowest resolution. """ gaussian_filter = tf.constant(_GAUSSIAN_FILTER) def spatial_conv(batch, gain): """Compute custom conv2d.""" s = tf.shape(input=batch) padded = tf.pad( tensor=batch, paddings=[[0, 0], [2, 2], [2, 2], [0, 0]], mode="REFLECT" ) xt = tf.transpose(a=padded, perm=[0, 3, 1, 2]) xt = tf.reshape(xt, [s[0] * s[3], s[1] + 4, s[2] + 4, 1]) conv_out = tf.nn.conv2d( input=xt, filters=gaussian_filter * gain, strides=[1] * 4, padding="VALID" ) conv_xt = tf.reshape(conv_out, [s[0], s[3], s[1], s[2]]) conv_xt = tf.transpose(a=conv_xt, perm=[0, 2, 3, 1]) return conv_xt def pyr_down(batch): # matches cv2.pyrDown() return spatial_conv(batch, 1)[:, ::2, ::2] def pyr_up(batch): # matches cv2.pyrUp() s = tf.shape(input=batch) zeros = tf.zeros([3 * s[0], s[1], s[2], s[3]]) res = tf.concat([batch, zeros], 0) res = batch_to_space(input=res, crops=[[0, 0], [0, 0]], block_shape=2) res = spatial_conv(res, 4) return res pyramid = [_to_float(batch)] for _ in range(1, num_levels): pyramid.append(pyr_down(pyramid[-1])) pyramid[-2] -= pyr_up(pyramid[-1]) return pyramid