Python cv2.INTER_AREA Examples
The following are 30
code examples of cv2.INTER_AREA().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: preprocessor.py From signature-recognition with MIT License | 7 votes |
def prepare(input): # preprocessing the image input clean = cv2.fastNlMeansDenoising(input) ret, tresh = cv2.threshold(clean, 127, 1, cv2.THRESH_BINARY_INV) img = crop(tresh) # 40x10 image as a flatten array flatten_img = cv2.resize(img, (40, 10), interpolation=cv2.INTER_AREA).flatten() # resize to 400x100 resized = cv2.resize(img, (400, 100), interpolation=cv2.INTER_AREA) columns = np.sum(resized, axis=0) # sum of all columns lines = np.sum(resized, axis=1) # sum of all lines h, w = img.shape aspect = w / h return [*flatten_img, *columns, *lines, aspect]
Example #2
Source File: detect_face.py From TNT with GNU General Public License v3.0 | 6 votes |
def imresample(img, sz): im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable return im_data # This method is kept for debugging purpose # h=img.shape[0] # w=img.shape[1] # hs, ws = sz # dx = float(w) / ws # dy = float(h) / hs # im_data = np.zeros((hs,ws,3)) # for a1 in range(0,hs): # for a2 in range(0,ws): # for a3 in range(0,3): # im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3] # return im_data
Example #3
Source File: data_loader.py From DeblurGAN-tf with MIT License | 6 votes |
def read_image_pair(pair_path, resize_or_crop=None, image_size=(256,256)): image_blur = cv2.imread(pair_path[0], cv2.IMREAD_COLOR) image_blur = image_blur / 255.0 * 2.0 - 1.0 image_real = cv2.imread(pair_path[1], cv2.IMREAD_COLOR) image_real = image_real / 255.0 * 2.0 - 1.0 if resize_or_crop != None: assert image_size != None if resize_or_crop == 'resize': image_blur = cv2.resize(image_blur, image_size, interpolation=cv2.INTER_AREA) image_real = cv2.resize(image_real, image_size, interpolation=cv2.INTER_AREA) elif resize_or_crop == 'crop': image_blur = cv2.crop(image_blur, image_size) image_real = cv2.crop(image_real, image_size) else: raise if np.size(np.shape(image_blur)) == 3: image_blur = np.expand_dims(image_blur, axis=0) if np.size(np.shape(image_real)) == 3: image_real = np.expand_dims(image_real, axis=0) image_blur = np.array(image_blur, dtype=np.float32) image_real = np.array(image_real, dtype=np.float32) return image_blur, image_real
Example #4
Source File: data_feeder.py From tf-lcnn with GNU General Public License v3.0 | 6 votes |
def get_mnist_data(is_train, image_size, batchsize): ds = MNISTCh('train' if is_train else 'test', shuffle=True) if is_train: augs = [ imgaug.RandomApplyAug(imgaug.RandomResize((0.8, 1.2), (0.8, 1.2)), 0.3), imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.5), imgaug.RandomApplyAug(imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01), 0.25), imgaug.Resize((224, 224), cv2.INTER_AREA) ] ds = AugmentImageComponent(ds, augs) ds = PrefetchData(ds, 128*10, multiprocessing.cpu_count()) ds = BatchData(ds, batchsize) ds = PrefetchData(ds, 256, 4) else: # no augmentation, only resizing augs = [ imgaug.Resize((image_size, image_size), cv2.INTER_CUBIC), ] ds = AugmentImageComponent(ds, augs) ds = BatchData(ds, batchsize) ds = PrefetchData(ds, 20, 2) return ds
Example #5
Source File: data_loader.py From DeblurGAN-tf with MIT License | 6 votes |
def read_image(path, resize_or_crop=None, image_size=(256,256)): image = cv2.imread(path, cv2.IMREAD_COLOR) image = image/255.0 * 2.0 - 1.0 assert resize_or_crop != None assert image_size != None if resize_or_crop == 'resize': image = cv2.resize(image, image_size, interpolation=cv2.INTER_AREA) elif resize_or_crop == 'crop': image = cv2.crop(image, image_size) if np.size(np.shape(image)) == 3: image = np.expand_dims(image, axis=0) image = np.array(image, dtype=np.float32) return image
Example #6
Source File: imutils.py From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 | 6 votes |
def resize(image, width=None, height=None, inter=cv2.INTER_AREA): # Grab the image size (h, w) = image.shape[:2] # If both the width and height are None, then return the original image if width is None and height is None: return image # Check to see if the width is None if width is None: # Calculate the ratio of the height and construct the dimensions r = height / float(h) dim = (int(w * r), height) # Otherwise, the height is None else: # Calculate the ratio of the width and construct the dimensions r = width / float(w) dim = (width, int(h * r)) # Resize the image resized = cv2.resize(image, dim, interpolation=inter) # Return the resized image return resized
Example #7
Source File: imutils.py From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 | 6 votes |
def resize(image, width=None, height=None, inter=cv2.INTER_AREA): # Grab the image size (h, w) = image.shape[:2] # If both the width and height are None, then return the original image if width is None and height is None: return image # Check to see if the width is None if width is None: # Calculate the ratio of the height and construct the dimensions r = height / float(h) dim = (int(w * r), height) # Otherwise, the height is None else: # Calculate the ratio of the width and construct the dimensions r = width / float(w) dim = (width, int(h * r)) # Resize the image resized = cv2.resize(image, dim, interpolation=inter) # Return the resized image return resized
Example #8
Source File: pose_dataset.py From tf-pose with Apache License 2.0 | 6 votes |
def get_heatmap(self, target_size): heatmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width), dtype=np.float32) for joints in self.joint_list: for idx, point in enumerate(joints): if point[0] < 0 or point[1] < 0: continue CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma) heatmap = heatmap.transpose((1, 2, 0)) # background heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0) if target_size: heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA) return heatmap.astype(np.float16)
Example #9
Source File: new_algo.py From Photoroid with GNU General Public License v3.0 | 6 votes |
def custom_hashing(image, hash_size=8): image = cv2.resize(image, (hash_size + 1, hash_size), cv2.INTER_AREA) pixel = [] [rows, cols] = image.shape for i in range(0, rows): for j in range(0, cols): pixel.append(image.item(i, j)) pixels = list(pixel) difference = [] for row in range(hash_size - 1): for col in range(hash_size - 1): pixel_left = image.item(row, col) pixel_right = image.item(row, col + 1) difference.append(pixel_left > pixel_right) decimal_value = 0 hex_string = [] for index, value in enumerate(difference): if value: decimal_value += 2 ** (index % 8) if (index % 8) == 7: hex_string.append(hex(decimal_value)[2:].rjust(2, "0")) decimal_value = 0 return "".join(hex_string)
Example #10
Source File: label.py From derplearning with MIT License | 6 votes |
def seek(self, frame_id=None): """Update the current frame to the given frame_id, otherwise advances by 1 frame""" if frame_id is None: frame_id = self.frame_id + 1 if frame_id < 0: frame_id = 0 self.paused = True if frame_id >= self.n_frames: frame_id = self.n_frames - 1 self.paused = True self.update_quality(self.frame_id, frame_id, self.quality) self.frame = cv2.resize( derp.util.decode_jpg(self.topics["camera"][frame_id].jpg), None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_AREA, ) self.frame_id = frame_id return True
Example #11
Source File: imutils.py From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 | 6 votes |
def resize(image, width=None, height=None, inter=cv2.INTER_AREA): # Grab the image size (h, w) = image.shape[:2] # If both the width and height are None, then return the original image if width is None and height is None: return image # Check to see if the width is None if width is None: # Calculate the ratio of the height and construct the dimensions r = height / float(h) dim = (int(w * r), height) # Otherwise, the height is None else: # Calculate the ratio of the width and construct the dimensions r = width / float(w) dim = (width, int(h * r)) # Resize the image resized = cv2.resize(image, dim, interpolation=inter) # Return the resized image return resized
Example #12
Source File: detect_face.py From insightface with MIT License | 6 votes |
def imresample(img, sz): im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable return im_data # This method is kept for debugging purpose # h=img.shape[0] # w=img.shape[1] # hs, ws = sz # dx = float(w) / ws # dy = float(h) / hs # im_data = np.zeros((hs,ws,3)) # for a1 in range(0,hs): # for a2 in range(0,ws): # for a3 in range(0,3): # im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3] # return im_data
Example #13
Source File: histogram_intersection.py From exposure with MIT License | 6 votes |
def read_images(src, tag=None, set=None): files = os.listdir(src) images = [] if set is not None: set = read_set(set) for f in files: if tag and f.find(tag) == -1: continue if set is not None: if int(f.split('.')[0]) not in set: continue image = (cv2.imread(os.path.join(src, f))[:, :, ::-1] / 255.0).astype(np.float32) longer_edge = min(image.shape[0], image.shape[1]) for i in range(4): sx = random.randrange(0, image.shape[0] - longer_edge + 1) sy = random.randrange(0, image.shape[1] - longer_edge + 1) new_image = image[sx:sx + longer_edge, sy:sy + longer_edge] patch = cv2.resize(new_image, dsize=(80, 80), interpolation=cv2.INTER_AREA) for j in range(4): target_size = 64 ssx = random.randrange(0, patch.shape[0] - target_size) ssy = random.randrange(0, patch.shape[1] - target_size) images.append(patch[ssx:ssx + target_size, ssy:ssy + target_size]) return images
Example #14
Source File: topic_utils.py From visual_foresight with MIT License | 6 votes |
def process_image(self, img): assert self._bot + self._top < img.shape[0], "Overcrop! bot + top crop >= image height!" assert self._right + self._left < img.shape[1], "Overcrop! right + left crop >= image width!" bot, right = self._bot, self._right if self._bot <= 0: bot = -(img.shape[0] + 10) if self._right <= 0: right = -(img.shape[1] + 10) img = img[self._top:-bot, self._left:-right] if self.flip: img = img[::-1, ::-1] if (self.height, self.width) != img.shape[:2]: return cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA) return img
Example #15
Source File: gym_wrapper.py From ngraph-python with Apache License 2.0 | 6 votes |
def _modify_observation(self, observation): # convert color to grayscale using luma component observation = ( observation[:, :, 0] * 0.299 + observation[:, :, 1] * 0.587 + observation[:, :, 2] * 0.114 ) observation = cv2.resize( observation, (84, 110), interpolation=cv2.INTER_AREA ) observation = observation[18:102, :] assert observation.shape == (84, 84) # convert to values between 0 and 1 observation = np.array(observation, dtype=np.uint8) return observation
Example #16
Source File: atari_wrapper.py From tf2rl with MIT License | 6 votes |
def observation(self, obs): if self._key is None: frame = obs else: frame = obs[self._key] if self._grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize( frame, (self._width, self._height), interpolation=cv2.INTER_AREA ) if self._grayscale: frame = np.expand_dims(frame, -1) if self._key is None: obs = frame else: obs = obs.copy() obs[self._key] = frame return obs
Example #17
Source File: vizdoom_gym.py From MazeExplorer with MIT License | 6 votes |
def _resize(img, shape): """Resize the specified image. :param img: image to resize :param shape: desired shape in the format (rows, columns) :return: resized image """ if not (OPENCV_AVAILABLE or PILLOW_AVAILABLE): raise ValueError('No image library backend found.'' Install either ' 'OpenCV or Pillow to support image processing.') if OPENCV_AVAILABLE: return cv2.resize(img, shape, interpolation=cv2.INTER_AREA) if PILLOW_AVAILABLE: return np.array(PIL.Image.fromarray(img).resize(shape)) raise NotImplementedError
Example #18
Source File: im_utils.py From visual_foresight with MIT License | 5 votes |
def resize_store(t, target_array, input_array): target_img_height, target_img_width = target_array.shape[2:4] if (target_img_height, target_img_width) == input_array.shape[1:3]: for i in range(input_array.shape[0]): target_array[t, i] = input_array[i] else: for i in range(input_array.shape[0]): target_array[t, i] = cv2.resize(input_array[i], (target_img_width, target_img_height), interpolation=cv2.INTER_AREA)
Example #19
Source File: place2.py From chainer-partial_convolution_image_inpainting with MIT License | 5 votes |
def do_resize(self, img): img = cv2.resize(img, (280, 336), interpolation=cv2.INTER_AREA) return img
Example #20
Source File: head_pose_estimation.py From pyERA with MIT License | 5 votes |
def return_yaw(self, image): """ Return the yaw angle associated with the input image. @param image It is a colour image. It must be >= 64 pixel """ #Uncomment if you want to see the image #cv2.imshow('image',image) #cv2.waitKey(0) #cv2.destroyAllWindows() h, w, d = image.shape #check if the image has the right shape if(h == w and h==64 and d==3): image_normalised = np.add(image, -127) #normalisation of the input feed_dict = {self.tf_yaw_input_vector : image_normalised} yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict) yaw_vector = np.multiply(yaw_raw, 100.0) #yaw = yaw_raw #* 100 #cnn out is in range [-1, +1] --> [-100, + 100] return yaw_vector #If the image is > 64 pixel then resize it if(h == w and h>64 and d==3): image_resized = cv2.resize(image, (64, 64), interpolation = cv2.INTER_AREA) image_normalised = np.add(image_resized, -127) #normalisation of the input feed_dict = {self.tf_yaw_input_vector : image_normalised} yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict) yaw_vector = np.multiply(yaw_raw, 100.0) #cnn-out is in range [-1, +1] --> [-100, + 100] return yaw_vector #wrong shape if(h != w or w<64 or h<64): raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input has wrong shape. Height and Width must be >= 64 pixel') #wrong number of channels if(d!=3): raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input does not have 3 channels, this function accepts only colour images.')
Example #21
Source File: test_yolov3.py From object-detection with MIT License | 5 votes |
def video_detection(sess, image, colors): resized_image = cv2.resize(image, (416, 416), interpolation=cv2.INTER_AREA) resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB) image_data = np.array(resized_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0}) image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) return image
Example #22
Source File: yolo_utils.py From object-detection with MIT License | 5 votes |
def preprocess_image(img_path, model_image_size): image = cv2.imread(img_path) resized_image = cv2.resize(image, tuple(reversed(model_image_size)), interpolation=cv2.INTER_AREA) # images/dog.jpg use this is good #resized_image = cv2.resize(image, tuple(reversed(model_image_size)), interpolation=cv2.INTER_CUBIC) resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB) image_data = np.array(resized_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) # Add batch dimension. return image, image_data
Example #23
Source File: ssd_mobilenet_utils.py From object-detection with MIT License | 5 votes |
def preprocess_image(image, model_image_size=(300,300)): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #image = cv2.resize(image, tuple(reversed(model_image_size)), interpolation=cv2.INTER_AREA) image = np.array(image, dtype='float32') image = np.expand_dims(image, 0) # Add batch dimension. return image
Example #24
Source File: test_tiny_yolo.py From object-detection with MIT License | 5 votes |
def video_detection(sess, image, colors): resized_image = cv2.resize(image, (416, 416), interpolation=cv2.INTER_AREA) resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB) image_data = np.array(resized_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0}) image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) return image
Example #25
Source File: helpers.py From hazymaze with Apache License 2.0 | 5 votes |
def resize_transparent_sprite(image, width=None, height=None, inter=cv2.INTER_AREA): # split image and alpha channel # resize them separately # join them other_channels = alpha_channel = image[:,:,:3] alpha_channel = image[:,:,3] dim = None (h, w) = image.shape[:2] # if both the width and height are None, then return the # original image if width is None and height is None: return image # check to see if the width is None if width is None: # calculate the ratio of the height and construct the # dimensions r = height / float(h) dim = (int(w * r), height) # otherwise, the height is None else: # calculate the ratio of the width and construct the # dimensions r = width / float(w) dim = (width, int(h * r)) image_resized = cv2.resize(other_channels, dim, interpolation=inter) alpha_resized = cv2.resize(alpha_channel, dim, interpolation=inter) new_image = np.empty((dim[1], dim[0], 4)) new_image[:, :, :3] = image_resized new_image[:, :, 3] = alpha_resized return new_image
Example #26
Source File: toolbox.py From findit with MIT License | 5 votes |
def compress_frame( old: np.ndarray, compress_rate: float = None, target_size: typing.Tuple[int, int] = None, not_grey: bool = None, interpolation: int = None, ) -> np.ndarray: """ Compress frame :param old: origin frame :param compress_rate: before_pic * compress_rate = after_pic. default to 1 (no compression) eg: 0.2 means 1/5 size of before_pic :param target_size: tuple. (100, 200) means compressing before_pic to 100x200 :param not_grey: convert into grey if True :param interpolation: :return: """ target = turn_grey(old) if not not_grey else old if not interpolation: interpolation = cv2.INTER_AREA # target size first if target_size: return cv2.resize(target, target_size, interpolation=interpolation) # else, use compress rate # default rate is 1 (no compression) if not compress_rate: return target return cv2.resize( target, (0, 0), fx=compress_rate, fy=compress_rate, interpolation=interpolation )
Example #27
Source File: dopamine_connector.py From tensor2tensor with Apache License 2.0 | 5 votes |
def observation(self, frames): if not cv2: return frames return np.array([cv2.resize( frame, (self.size, self.size), interpolation=cv2.INTER_AREA) for frame in frames])
Example #28
Source File: augmentations.py From ScanSSD with MIT License | 5 votes |
def __call__(self, image, boxes=None, labels=None): # plt.imshow(image) # plt.savefig("eval/bface.png", dpi=600) # plt.close() image = cv2.resize(image, (self.size,self.size), interpolation=cv2.INTER_AREA) # plt.imshow(image) # plt.savefig("eval/face.png", dpi=600) # plt.close() return image, boxes, labels
Example #29
Source File: __init__.py From ScanSSD with MIT License | 5 votes |
def base_transform(image, size, mean): #print('Image size ', image.shape) image = image.astype(np.float32) x = cv2.resize(image, (size, size), interpolation=cv2.INTER_AREA).astype(np.float32) x -= mean return x
Example #30
Source File: atari_wrapper.py From tf2rl with MIT License | 5 votes |
def process(frame): if frame.size == 210 * 160 * 3: img = np.reshape(frame, [210, 160, 3]).astype(np.float32) elif frame.size == 250 * 160 * 3: img = np.reshape(frame, [250, 160, 3]).astype(np.float32) else: assert False, "Unknown resolution." img = img[:, :, 0] * 0.299 + img[:, :, 1] * \ 0.587 + img[:, :, 2] * 0.114 resized_screen = cv2.resize( img, (84, 110), interpolation=cv2.INTER_AREA) x_t = resized_screen[18:102, :] x_t = np.reshape(x_t, [84, 84, 1]) return x_t.astype(np.uint8)