Python cv2.copyMakeBorder() Examples
The following are 30
code examples of cv2.copyMakeBorder().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: image.py From ggcnn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def inpaint(self, missing_value=0): """ Inpaint missing values in depth image. :param missing_value: Value to fill in teh depth image. """ # cv2 inpainting doesn't handle the border properly # https://stackoverflow.com/questions/25974033/inpainting-depth-map-still-a-black-image-border self.img = cv2.copyMakeBorder(self.img, 1, 1, 1, 1, cv2.BORDER_DEFAULT) mask = (self.img == missing_value).astype(np.uint8) # Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy. scale = np.abs(self.img).max() self.img = self.img.astype(np.float32) / scale # Has to be float32, 64 not supported. self.img = cv2.inpaint(self.img, mask, 1, cv2.INPAINT_NS) # Back to original size and value range. self.img = self.img[1:-1, 1:-1] self.img = self.img * scale
Example #2
Source File: datasets.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) size = image.shape name = osp.splitext(osp.basename(datafiles["img"]))[0] image = np.asarray(image, np.float32) image -= self.mean img_h, img_w, _ = image.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) image = image.transpose((2, 0, 1)) return image, name, size
Example #3
Source File: datasets.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE) size = image.shape name = datafiles["name"] if self.scale: image, label = self.generate_scale_label(image, label) image = np.asarray(image, np.float32) image -= self.mean img_h, img_w = label.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(self.ignore_label,)) else: img_pad, label_pad = image, label img_h, img_w = label_pad.shape h_off = random.randint(0, img_h - self.crop_h) w_off = random.randint(0, img_w - self.crop_w) # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h); image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) #image = image[:, :, ::-1] # change to BGR image = image.transpose((2, 0, 1)) if self.is_mirror: flip = np.random.choice(2) * 2 - 1 image = image[:, :, ::flip] label = label[:, ::flip] return image.copy(), label.copy(), np.array(size), name
Example #4
Source File: retina_net.py From Rekognition with GNU General Public License v3.0 | 6 votes |
def _pad_input_image(img, max_steps): """pad image to suitable shape""" logger.info(msg="_pad_input_image called") img_h, img_w, _ = img.shape img_pad_h = 0 if img_h % max_steps > 0: img_pad_h = max_steps - img_h % max_steps img_pad_w = 0 if img_w % max_steps > 0: img_pad_w = max_steps - img_w % max_steps padd_val = np.mean(img, axis=(0, 1)).astype(np.uint8) img = cv2.copyMakeBorder(img, 0, img_pad_h, 0, img_pad_w, cv2.BORDER_CONSTANT, value=padd_val.tolist()) pad_params = [img_h, img_w, img_pad_h, img_pad_w] return img, pad_params
Example #5
Source File: datasets.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) size = image.shape name = osp.splitext(osp.basename(datafiles["img"]))[0] image = np.asarray(image, np.float32) image -= self.mean img_h, img_w, _ = image.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) image = image.transpose((2, 0, 1)) return image, name, size
Example #6
Source File: tiles.py From argus-freesound with MIT License | 6 votes |
def cut_patch(self, image: np.ndarray, slice_index, border_type=cv2.BORDER_CONSTANT, value=0): assert image.shape[0] == self.image_height assert image.shape[1] == self.image_width orig_shape_len = len(image.shape) image = cv2.copyMakeBorder(image, self.margin_top, self.margin_bottom, self.margin_left, self.margin_right, borderType=border_type, value=value) # This check recovers possible lack of last dummy dimension for single-channel images if len(image.shape) != orig_shape_len: image = np.expand_dims(image, axis=-1) x, y, tile_width, tile_height = self.crops[slice_index] tile = image[y:y + tile_height, x:x + tile_width].copy() assert tile.shape[0] == self.tile_size[0] assert tile.shape[1] == self.tile_size[1] return tile
Example #7
Source File: datasets.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE) label = self.id2trainId(label) size = image.shape name = datafiles["name"] if self.scale: image, label = self.generate_scale_label(image, label) image = np.asarray(image, np.float32) image -= self.mean img_h, img_w = label.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(self.ignore_label,)) else: img_pad, label_pad = image, label img_h, img_w = label_pad.shape h_off = random.randint(0, img_h - self.crop_h) w_off = random.randint(0, img_w - self.crop_w) # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h); image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) #image = image[:, :, ::-1] # change to BGR image = image.transpose((2, 0, 1)) if self.is_mirror: flip = np.random.choice(2) * 2 - 1 image = image[:, :, ::flip] label = label[:, ::flip] return image.copy(), label.copy(), np.array(size), name
Example #8
Source File: opencv.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): """Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image """ hdl = NDArrayHandle() check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
Example #9
Source File: tiles.py From argus-freesound with MIT License | 6 votes |
def split(self, image, border_type=cv2.BORDER_CONSTANT, value=0): assert image.shape[0] == self.image_height assert image.shape[1] == self.image_width orig_shape_len = len(image.shape) image = cv2.copyMakeBorder(image, self.margin_top, self.margin_bottom, self.margin_left, self.margin_right, borderType=border_type, value=value) # This check recovers possible lack of last dummy dimension for single-channel images if len(image.shape) != orig_shape_len: image = np.expand_dims(image, axis=-1) tiles = [] for x, y, tile_width, tile_height in self.crops: tile = image[y:y + tile_height, x:x + tile_width].copy() assert tile.shape[0] == self.tile_size[0] assert tile.shape[1] == self.tile_size[1] tiles.append(tile) return tiles
Example #10
Source File: ScreenGrab.py From BiblioPixelAnimations with MIT License | 6 votes |
def step(self, amt=1): image = self._capFrame() if self.crop: image = image[self._cropY + self.yoff:self._ih - self._cropY + self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff] else: t, b, l, r = self._pad image = cv2.copyMakeBorder( image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0]) resized = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_LINEAR) if self.mirror: resized = cv2.flip(resized, 1) for y in range(self.height): for x in range(self.width): self.layout.set(x, y, tuple(resized[y, x][0:3]))
Example #11
Source File: datasets.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR) size = image.shape name = osp.splitext(osp.basename(datafiles["img"]))[0] image = np.asarray(image, np.float32) image = (image - image.min()) / (image.max() - image.min()) img_h, img_w, _ = image.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) image = image.transpose((2, 0, 1)) return image, np.array(size), name
Example #12
Source File: write_tfrecord.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 6 votes |
def _resize_image(img): dst_width = CFG.ARCH.INPUT_SIZE[0] dst_height = CFG.ARCH.INPUT_SIZE[1] h_old, w_old, _ = img.shape height = dst_height width = int(w_old * height / h_old) if width < dst_width: left_padding = int((dst_width - width)/2) right_padding = dst_width - width - left_padding resized_img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC) resized_img = cv2.copyMakeBorder(resized_img, 0, 0, left_padding, right_padding, cv2.BORDER_CONSTANT, value=[255, 255, 255]) else: resized_img = cv2.resize(img, (dst_width, height), interpolation=cv2.INTER_CUBIC) return resized_img
Example #13
Source File: coco_hpe2_dataset.py From imgclsmob with MIT License | 6 votes |
def pad_width(img, stride, pad_value, min_dims): h, w, _ = img.shape h = min(min_dims[0], h) min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride min_dims[1] = max(min_dims[1], w) min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride top = int(math.floor((min_dims[0] - h) / 2.0)) left = int(math.floor((min_dims[1] - w) / 2.0)) bottom = int(min_dims[0] - h - top) right = int(min_dims[1] - w - left) pad = [top, left, bottom, right] padded_img = cv2.copyMakeBorder( src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=pad_value) return padded_img, pad # ---------------------------------------------------------------------------------------------------------------------
Example #14
Source File: coco_hpe2_dataset.py From imgclsmob with MIT License | 6 votes |
def pad_width(img, stride, pad_value, min_dims): h, w, _ = img.shape h = min(min_dims[0], h) min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride min_dims[1] = max(min_dims[1], w) min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride top = int(math.floor((min_dims[0] - h) / 2.0)) left = int(math.floor((min_dims[1] - w) / 2.0)) bottom = int(min_dims[0] - h - top) right = int(min_dims[1] - w - left) pad = [top, left, bottom, right] padded_img = cv2.copyMakeBorder( src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=pad_value) return padded_img, pad # ---------------------------------------------------------------------------------------------------------------------
Example #15
Source File: coco_hpe2_dataset.py From imgclsmob with MIT License | 6 votes |
def pad_width(img, stride, pad_value, min_dims): h, w, _ = img.shape h = min(min_dims[0], h) min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride min_dims[1] = max(min_dims[1], w) min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride top = int(math.floor((min_dims[0] - h) / 2.0)) left = int(math.floor((min_dims[1] - w) / 2.0)) bottom = int(min_dims[0] - h - top) right = int(min_dims[1] - w - left) pad = [top, left, bottom, right] padded_img = cv2.copyMakeBorder( src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=pad_value) return padded_img, pad # ---------------------------------------------------------------------------------------------------------------------
Example #16
Source File: coco_hpe2_dataset.py From imgclsmob with MIT License | 6 votes |
def pad_width(img, stride, pad_value, min_dims): h, w, _ = img.shape h = min(min_dims[0], h) min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride min_dims[1] = max(min_dims[1], w) min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride top = int(math.floor((min_dims[0] - h) / 2.0)) left = int(math.floor((min_dims[1] - w) / 2.0)) bottom = int(min_dims[0] - h - top) right = int(min_dims[1] - w - left) pad = [top, left, bottom, right] padded_img = cv2.copyMakeBorder( src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=pad_value) return padded_img, pad # ---------------------------------------------------------------------------------------------------------------------
Example #17
Source File: cv_detection_right_hand.py From AI-Robot-Challenge-Lab with MIT License | 6 votes |
def __apply_template_matching(angle, template, image): # Rotate the template template_rotated = __rotate_image_size_corrected(template, angle) # Apply template matching image_templated = cv2.matchTemplate(image, template_rotated, cv2.TM_CCOEFF_NORMED) # Correct template matching image size difference template_rotated_height, template_rotated_width = template_rotated.shape template_half_height = template_rotated_height // 2 template_half_width = template_rotated_width // 2 image_templated_inrange_size_corrected = cv2.copyMakeBorder(image_templated, template_half_height, template_half_height, template_half_width, template_half_width, cv2.BORDER_CONSTANT, value=0) # Calculate maximum match coefficient max_match = numpy.max(image_templated_inrange_size_corrected) return (max_match, angle, template_rotated, image_templated_inrange_size_corrected)
Example #18
Source File: datasets.py From CCNet with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) size = image.shape name = osp.splitext(osp.basename(datafiles["img"]))[0] image = np.asarray(image, np.float32) image -= self.mean img_h, img_w, _ = image.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) image = image.transpose((2, 0, 1)) return image, np.array(size), name
Example #19
Source File: sal_data_layer.py From DSS with MIT License | 6 votes |
def load_label(self, idx): """ Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss. """ im = Image.open(self.data_root + self.label_lst[idx]) label = np.array(im) / 255#cv2.imread(self.data_root + self.label_lst[idx], 0) / 255 #if self.scales != None: # label = cv2.resize(label, None, None, fx=self.scales[self.scale_ind], fy=self.scales[self.scale_ind], \ # interpolation=cv2.INTER_NEAREST) #height, width = label.shape[:2] #h_off = self.crop_size - height #w_off = self.crop_size - width #label = cv2.copyMakeBorder(label, 0, max(0, h_off), 0, max(0, w_off), cv2.BORDER_CONSTANT, value=[-1,]) #label = label[self.h_off:self.h_off+self.height, self.w_off:self.w_off+self.width] label = label[np.newaxis, ...] if self.flip == 1: label = label[:,:,::-1] return label
Example #20
Source File: sal_data_layer.py From DSS with MIT License | 6 votes |
def load_region(self, idx): """ Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss. """ im = Image.open(self.data_root + self.region_lst[idx]) region = np.array(im, dtype=np.float32) / 15.0 #print np.unique(region) #if self.scales != None: # label = cv2.resize(label, None, None, fx=self.scales[self.scale_ind], fy=self.scales[self.scale_ind], \ # interpolation=cv2.INTER_NEAREST) #height, width = label.shape[:2] #h_off = self.crop_size - height #w_off = self.crop_size - width #label = cv2.copyMakeBorder(label, 0, max(0, h_off), 0, max(0, w_off), cv2.BORDER_CONSTANT, value=[-1,]) region = region[np.newaxis, ...] if self.flip == 1: region = region[:,:,::-1] return region
Example #21
Source File: datasets.py From CCNet with MIT License | 6 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) size = image.shape name = osp.splitext(osp.basename(datafiles["img"]))[0] image = np.asarray(image, np.float32) image -= self.mean img_h, img_w, _ = image.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) image = image.transpose((2, 0, 1)) return image, name, size
Example #22
Source File: dilated.py From adversarial-attacks with MIT License | 6 votes |
def PreprocessImage(name, args): """Preprocess according to the original author's code.""" image = cv2.imread(name, 1).astype(np.float32) - args.mean if args.resize_dims is not None: image = cv2.resize(image, dsize = (args.resize_dims[0], args.resize_dims[1])) im_height = image.shape[0] im_width = image.shape[1] label_margin = 186 input_image = cv2.copyMakeBorder(image, label_margin, label_margin, label_margin, label_margin, cv2.BORDER_REFLECT_101) input_size = [args.pad_size[1], args.pad_size[0]] # Order is H x W margin = [0, input_size[0] - input_image.shape[0], 0, input_size[1] - input_image.shape[1]] input_image = cv2.copyMakeBorder(input_image, margin[0], margin[1], margin[2], margin[3], cv2.BORDER_REFLECT_101) input_image = input_image.transpose([2,0,1]) # To make it C x H x W return input_image, im_height, im_width, image
Example #23
Source File: utils.py From generative_adversary with GNU General Public License v3.0 | 6 votes |
def label_images(images, labels): font = cv.FONT_HERSHEY_SIMPLEX new_imgs = [] for i, img in enumerate(images): new_img = ((img.copy() + 1.) * 127.5).astype(np.uint8) if new_img.shape[-1] == 3: new_img = new_img[..., ::-1] new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR) new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255, 255, 255), 2, cv.LINE_AA) new_img = cv.copyMakeBorder(new_img, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT, value=(255, 255, 255)) else: new_img = np.squeeze(new_img) new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR) new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255), 2, cv.LINE_AA) new_img = new_img[..., None] new_img = (new_img / 127.5 - 1.0).astype(np.float32) new_imgs.append(new_img[..., ::-1]) return np.stack(new_imgs, axis=0)
Example #24
Source File: opencv_video.py From BiblioPixelAnimations with MIT License | 5 votes |
def step(self, amt=1): ret, frame = self._vid.read() image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGRA) if self.crop: image = image[self._cropY + self.yoff:self._ih - self._cropY + self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff] else: t, b, l, r = self._pad image = cv2.copyMakeBorder( image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0]) resized = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_CUBIC) if self.mirror: resized = cv2.flip(resized, 1) for y in range(self.height): for x in range(self.width): self.layout.set(x, y, tuple(resized[y, x][0:3])) if not isinstance(self.videoSource, int): self._frameCount += 1 if self._frameCount >= self._frameTotal: self._vid.set(1, 0) # CV_CAP_PROP_POS_FRAMES self._frameCount = 0 self.animComplete = True
Example #25
Source File: datasets.py From CCNet with MIT License | 5 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE) size = image.shape name = datafiles["name"] if self.scale: image, label = self.generate_scale_label(image, label) image = np.asarray(image, np.float32) image -= self.mean img_h, img_w = label.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(self.ignore_label,)) else: img_pad, label_pad = image, label img_h, img_w = label_pad.shape h_off = random.randint(0, img_h - self.crop_h) w_off = random.randint(0, img_w - self.crop_w) # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h); image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) #image = image[:, :, ::-1] # change to BGR image = image.transpose((2, 0, 1)) if self.is_mirror: flip = np.random.choice(2) * 2 - 1 image = image[:, :, ::flip] label = label[:, ::flip] return image.copy(), label.copy(), np.array(size), name
Example #26
Source File: SudokuExtractor.py From SolveSudoku with MIT License | 5 votes |
def scale_and_centre(img, size, margin=0, background=0): """Scales and centres an image onto a new background square.""" h, w = img.shape[:2] def centre_pad(length): """Handles centering for a given length that may be odd or even.""" if length % 2 == 0: side1 = int((size - length) / 2) side2 = side1 else: side1 = int((size - length) / 2) side2 = side1 + 1 return side1, side2 def scale(r, x): return int(r * x) if h > w: t_pad = int(margin / 2) b_pad = t_pad ratio = (size - margin) / h w, h = scale(ratio, w), scale(ratio, h) l_pad, r_pad = centre_pad(w) else: l_pad = int(margin / 2) r_pad = l_pad ratio = (size - margin) / w w, h = scale(ratio, w), scale(ratio, h) t_pad, b_pad = centre_pad(h) img = cv2.resize(img, (w, h)) img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background) return cv2.resize(img, (size, size))
Example #27
Source File: SudokuExtractor.py From SolveSudoku with MIT License | 5 votes |
def show_digits(digits, colour=255): """Shows list of 81 extracted digits in a grid format""" rows = [] with_border = [cv2.copyMakeBorder(img.copy(), 1, 1, 1, 1, cv2.BORDER_CONSTANT, None, colour) for img in digits] for i in range(9): row = np.concatenate(with_border[i * 9:((i + 1) * 9)], axis=1) rows.append(row) img = show_image(np.concatenate(rows)) return img
Example #28
Source File: datasets.py From CCNet with MIT License | 5 votes |
def __getitem__(self, index): datafiles = self.files[index] image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE) label = self.id2trainId(label) size = image.shape name = datafiles["name"] if self.scale: image, label = self.generate_scale_label(image, label) image = np.asarray(image, np.float32) image -= self.mean img_h, img_w = label.shape pad_h = max(self.crop_h - img_h, 0) pad_w = max(self.crop_w - img_w, 0) if pad_h > 0 or pad_w > 0: img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(0.0, 0.0, 0.0)) label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(self.ignore_label,)) else: img_pad, label_pad = image, label img_h, img_w = label_pad.shape h_off = random.randint(0, img_h - self.crop_h) w_off = random.randint(0, img_w - self.crop_w) # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h); image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) #image = image[:, :, ::-1] # change to BGR image = image.transpose((2, 0, 1)) if self.is_mirror: flip = np.random.choice(2) * 2 - 1 image = image[:, :, ::flip] label = label[:, ::flip] return image.copy(), label.copy(), np.array(size), name
Example #29
Source File: transforms.py From VS-ReID with BSD 2-Clause "Simplified" License | 5 votes |
def pad(img, offeset, value=0): h1, w1, h2, w2 = offeset img = cv2.copyMakeBorder( img, h1, h2, w1, w2, cv2.BORDER_CONSTANT, value=value) return img
Example #30
Source File: datasets.py From pytorch-yolov3 with GNU General Public License v3.0 | 5 votes |
def letterbox(img, height=416, color=(127.5, 127.5, 127.5)): # Resize a rectangular image to a padded square shape = img.shape[:2] # shape = [height, width] ratio = float(height) / max(shape) # ratio = old / new new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) dw = (height - new_shape[0]) / 2 # width padding dh = (height - new_shape[1]) / 2 # height padding top, bottom = round(dh - 0.1), round(dh + 0.1) left, right = round(dw - 0.1), round(dw + 0.1) img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square return img, ratio, dw, dh