Python cv2.BORDER_TRANSPARENT Examples
The following are 30
code examples of cv2.BORDER_TRANSPARENT().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: neural_style.py From neural-style-tf with GNU General Public License v3.0 | 5 votes |
def warp_image(src, flow): _, h, w = flow.shape flow_map = np.zeros(flow.shape, dtype=np.float32) for y in range(h): flow_map[1,y,:] = float(y) + flow[1,y,:] for x in range(w): flow_map[0,:,x] = float(x) + flow[0,:,x] # remap pixels to optical flow dst = cv2.remap( src, flow_map[0], flow_map[1], interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_TRANSPARENT) return dst
Example #2
Source File: TYY_MORPH_create_db.py From SSR-Net with Apache License 2.0 | 5 votes |
def warp_im(im, M, dshape): output_im = np.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #3
Source File: TYY_MORPH_create_db.py From MaskInsightface with Apache License 2.0 | 5 votes |
def warp_im(im, M, dshape): output_im = np.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #4
Source File: align.py From photo-a-day-aligner with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = numpy.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #5
Source File: televisionfunctions.py From SaltwashAR with GNU General Public License v3.0 | 5 votes |
def add_substitute_quad(image, substitute_quad, dst): # dst (zeroed) and src points dst = _order_points(dst) (tl, tr, br, bl) = dst min_x = min(int(tl[0]), int(bl[0])) min_y = min(int(tl[1]), int(tr[1])) for point in dst: point[0] = point[0] - min_x point[1] = point[1] - min_y (max_width,max_height) = _max_width_height(dst) src = _topdown_points(max_width, max_height) # warp perspective (with white border) substitute_quad = cv2.resize(substitute_quad, (max_width,max_height)) warped = np.zeros((max_height,max_width,3), np.uint8) warped[:,:,:] = 255 matrix = cv2.getPerspectiveTransform(src, dst) cv2.warpPerspective(substitute_quad, matrix, (max_width,max_height), warped, borderMode=cv2.BORDER_TRANSPARENT) # add substitute quad image[min_y:min_y + max_height, min_x:min_x + max_width] = warped return image
Example #6
Source File: imagemaker.py From Trusty-cogs with MIT License | 5 votes |
def rotoscope(self, dst, warp, properties: dict): if not properties["show"]: return dst corners = properties["corners"] wRows, wCols, wCh = warp.shape rows, cols, ch = dst.shape # Apply blur on warp kernel = np.ones((5, 5), np.float32) / 25 warp = cv2.filter2D(warp, -1, kernel) # Prepare points to be matched on Affine Transformation pts1 = np.float32([[0, 0], [wCols, 0], [0, wRows]]) pts2 = np.float32(corners) * 2 # Enlarge image to multisample dst = cv2.resize(dst, (cols * 2, rows * 2)) # Transform image with the Matrix M = cv2.getAffineTransform(pts1, pts2) cv2.warpAffine( warp, M, (cols * 2, rows * 2), dst, flags=cv2.INTER_AREA, borderMode=cv2.BORDER_TRANSPARENT, ) # Sample back image size dst = cv2.resize(dst, (cols, rows)) return dst
Example #7
Source File: morpher.py From face_merge_master with Apache License 2.0 | 5 votes |
def tran_matrix(src_img, src_points, dst_img, dst_points): h = cv2.findHomography(dst_points, src_points) output = cv2.warpAffine(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]), borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output
Example #8
Source File: morpher.py From face_merge_master with Apache License 2.0 | 5 votes |
def transformation_points(src_img, src_points, dst_img, dst_points): src_points = src_points.astype(np.float64) dst_points = dst_points.astype(np.float64) c1 = np.mean(src_points, axis=0) c2 = np.mean(dst_points, axis=0) src_points -= c1 dst_points -= c2 s1 = np.std(src_points) s2 = np.std(dst_points) src_points /= s1 dst_points /= s2 u, s, vt = np.linalg.svd(src_points.T * dst_points) r = (u * vt).T m = np.vstack([np.hstack(((s2 / s1) * r, c2.T - (s2 / s1) * r * c1.T)), np.matrix([0., 0., 1.])]) output = cv2.warpAffine(dst_img, m[:2], (src_img.shape[1], src_img.shape[0]), borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output
Example #9
Source File: face_swap.py From average_portrait with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = numpy.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #10
Source File: face_align.py From average_portrait with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = numpy.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #11
Source File: faceswapper.py From FaceSwapper with Apache License 2.0 | 5 votes |
def warp_im(self,im, M, dshape): ''' 人脸位置仿射变换 ''' output_im = np.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #12
Source File: faceswap.py From mica-race-from-face with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = numpy.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #13
Source File: faceswap.py From faceswap with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = numpy.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #14
Source File: main.py From FaceSwap with MIT License | 5 votes |
def warp_image(img, tM, shape): out = np.zeros(shape, dtype=img.dtype) # cv2.warpAffine(img, # tM[:2], # (shape[1], shape[0]), # dst=out, # borderMode=cv2.BORDER_TRANSPARENT, # flags=cv2.WARP_INVERSE_MAP) cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return out # TODO: Modify this method to get a better face contour mask
Example #15
Source File: morpher.py From yry with Apache License 2.0 | 5 votes |
def transformation_points(src_img, src_points, dst_img, dst_points): src_points = src_points.astype(np.float64) dst_points = dst_points.astype(np.float64) c1 = np.mean(src_points, axis=0) c2 = np.mean(dst_points, axis=0) src_points -= c1 dst_points -= c2 s1 = np.std(src_points) s2 = np.std(dst_points) src_points /= s1 dst_points /= s2 u, s, vt = np.linalg.svd(src_points.T * dst_points) r = (u * vt).T m = np.vstack([np.hstack(((s2 / s1) * r, c2.T - (s2 / s1) * r * c1.T)), np.matrix([0., 0., 1.])]) output = cv2.warpAffine(dst_img, m[:2], (src_img.shape[1], src_img.shape[0]), borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output
Example #16
Source File: baseline.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #17
Source File: morpher.py From yry with Apache License 2.0 | 5 votes |
def tran_matrix(src_img, src_points, dst_img, dst_points): h = cv2.findHomography(dst_points, src_points) output = cv2.warpAffine(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]), borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output
Example #18
Source File: trump.py From Trusty-cogs-archive with MIT License | 5 votes |
def rotoscope(self, dst, warp, properties): if properties['show'] == False: return dst corners = properties['corners'] wRows, wCols, wCh = warp.shape rows, cols, ch = dst.shape # Apply blur on warp kernel = np.ones((5, 5), np.float32) / 25 warp = cv2.filter2D(warp, -1, kernel) # Prepare points to be matched on Affine Transformation pts1 = np.float32([[0, 0],[wCols, 0],[0, wRows]]) pts2 = np.float32(corners) * 2 # Enlarge image to multisample dst = cv2.resize(dst, (cols * 2, rows * 2)) # Transform image with the Matrix M = cv2.getAffineTransform(pts1, pts2) cv2.warpAffine(warp, M, (cols * 2, rows * 2), dst, flags=cv2.INTER_AREA, borderMode=cv2.BORDER_TRANSPARENT) # Sample back image size dst = cv2.resize(dst, (cols, rows)) return dst
Example #19
Source File: faceswap.py From faceai with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = numpy.zeros(dshape, dtype=im.dtype) cv2.warpAffine( im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #20
Source File: baseline3Pool.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #21
Source File: baselineSERes18Conc.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #22
Source File: baselineDiceFocalLoss.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #23
Source File: baselineDiceCrossEntropy.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #24
Source File: baselineRes18Conc.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #25
Source File: cacd_process.py From VisualizingNDF with MIT License | 5 votes |
def warp_im(im, M, dshape): output_im = np.zeros(dshape, dtype=im.dtype) cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output_im
Example #26
Source File: baseline4Pool.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #27
Source File: baseline2Pool.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #28
Source File: baselineSERes18Sum.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #29
Source File: AnatomyNet.py From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 | 5 votes |
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ # affine and deformation must be slice by slice and fixed for slices if random_state is None: random_state = np.random.RandomState(None) shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel # [:,:,:,1] is the mask. The two channel are under the same tranformation. shape_size = shape[:-1] # z y x # Random affine shape_size_aff = shape[1:-1] # y x center_square = np.float32(shape_size_aff) // 2 square_size = min(shape_size_aff) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) new_img = np.zeros_like(image) for i in range(shape[0]): new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.) for j in range(1, 10): new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0) dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) new_img2 = np.zeros_like(image) for i in range(shape[0]): new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1]) for j in range(1, 10): new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1]) return np.array(new_img2), new_img
Example #30
Source File: helpers.py From songoku with MIT License | 5 votes |
def perspective_transform(img, transformation_matrix, original_shape=None): warped = img if original_shape is not None: if original_shape[0]>0 and original_shape[1]>0: warped = cv2.resize(warped, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_CUBIC) white_image = np.zeros((640, 480, 3), np.uint8) white_image[:,:,:] = 255 # warped = cv2.warpPerspective(warped, transformation_matrix, (640, 480), borderMode=cv2.BORDER_TRANSPARENT) warped = cv2.warpPerspective(warped, transformation_matrix, (640, 480)) return warped