Python tensorflow.python.ops.array_ops.meshgrid() Examples
The following are 4
code examples of tensorflow.python.ops.array_ops.meshgrid().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.array_ops
, or try the search function
.
Example #1
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _compareDiff(self, x, y, use_gpu): for index in ('ij', 'xy'): numpy_out = np.meshgrid(x, y, indexing=index) tf_out = array_ops.meshgrid(x, y, indexing=index) with self.test_session(use_gpu=use_gpu): for xx, yy in zip(numpy_out, tf_out): self.assertAllEqual(xx, yy.eval())
Example #2
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _compareDiffType(self, n, np_dtype, use_gpu): inputs = [] for index in ('ij', 'xy'): for i in range(n): x = np.linspace(-10, 10, 5).astype(np_dtype) if np_dtype in (np.complex64, np.complex128): x += 1j inputs.append(x) numpy_out = np.meshgrid(*inputs, indexing=index) with self.test_session(use_gpu=use_gpu): tf_out = array_ops.meshgrid(*inputs, indexing=index) for X, _X in zip(numpy_out, tf_out): self.assertAllEqual(X, _X.eval())
Example #3
Source File: dense_image_warp.py From WarpGAN with MIT License | 4 votes |
def dense_image_warp(image, flow, name='dense_image_warp'): """Image warping using per-pixel flow vectors. Apply a non-linear warp to the image, where the warp is specified by a dense flow field of offset vectors that define the correspondences of pixel values in the output image back to locations in the source image. Specifically, the pixel value at output[b, j, i, c] is images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c]. The locations specified by this formula do not necessarily map to an int index. Therefore, the pixel value is obtained by bilinear interpolation of the 4 nearest pixels around (b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside of the image, we use the nearest pixel values at the image boundary. Args: image: 4-D float `Tensor` with shape `[batch, height, width, channels]`. flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`. name: A name for the operation (optional). Note that image and flow can be of type tf.half, tf.float32, or tf.float64, and do not necessarily have to be the same type. Returns: A 4-D float `Tensor` with shape`[batch, height, width, channels]` and same type as input image. Raises: ValueError: if height < 2 or width < 2 or the inputs have the wrong number of dimensions. """ with ops.name_scope(name): batch_size, height, width, channels = image.get_shape().as_list() batch_size = tf.shape(image)[0] # The flow is defined on the image grid. Turn the flow into a list of query # points in the grid space. grid_x, grid_y = array_ops.meshgrid( math_ops.range(width), math_ops.range(height)) stacked_grid = math_ops.cast( array_ops.stack([grid_y, grid_x], axis=2), flow.dtype) batched_grid = array_ops.expand_dims(stacked_grid, axis=0) query_points_on_grid = batched_grid - flow query_points_flattened = array_ops.reshape(query_points_on_grid, [batch_size, height * width, 2]) # Compute values at the query points, then reshape the result back to the # image grid. interpolated = _interpolate_bilinear(image, query_points_flattened) interpolated = array_ops.reshape(interpolated, [batch_size, height, width, channels]) return interpolated
Example #4
Source File: core_warp.py From unsupervised_detection with MIT License | 4 votes |
def dense_image_warp(image, flow, name='dense_image_warp'): """Image warping using per-pixel flow vectors. Apply a non-linear warp to the image, where the warp is specified by a dense flow field of offset vectors that define the correspondences of pixel values in the output image back to locations in the source image. Specifically, the pixel value at output[b, j, i, c] is images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c]. The locations specified by this formula do not necessarily map to an int index. Therefore, the pixel value is obtained by bilinear interpolation of the 4 nearest pixels around (b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside of the image, we use the nearest pixel values at the image boundary. Args: image: 4-D float `Tensor` with shape `[batch, height, width, channels]`. flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`. name: A name for the operation (optional). Note that image and flow can be of type tf.half, tf.float32, or tf.float64, and do not necessarily have to be the same type. Returns: A 4-D float `Tensor` with shape`[batch, height, width, channels]` and same type as input image. Raises: ValueError: if height < 2 or width < 2 or the inputs have the wrong number of dimensions. """ with ops.name_scope(name): batch_size, height, width, channels = array_ops.unstack(array_ops.shape(image)) # The flow is defined on the image grid. Turn the flow into a list of query # points in the grid space. grid_x, grid_y = array_ops.meshgrid( math_ops.range(width), math_ops.range(height)) stacked_grid = math_ops.cast( array_ops.stack([grid_y, grid_x], axis=2), flow.dtype) batched_grid = array_ops.expand_dims(stacked_grid, axis=0) query_points_on_grid = batched_grid - flow query_points_flattened = array_ops.reshape(query_points_on_grid, [batch_size, height * width, 2]) # Compute values at the query points, then reshape the result back to the # image grid. interpolated = _interpolate_bilinear(image, query_points_flattened) interpolated = array_ops.reshape(interpolated, [batch_size, height, width, channels]) return interpolated