Python imutils.resize() Examples
The following are 30
code examples of imutils.resize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
imutils
, or try the search function
.
Example #1
Source File: Camera.py From Facial-Recognition-Attendance-System with MIT License | 8 votes |
def start_capture(self, height=None, width=None, usingPiCamera=IS_RASPBERRY_PI, ): import imutils from imutils.video import VideoStream resolution = (self.height, self.width) if height: if width: resolution = (height, width) print("Camera Resolution:", resolution) cf = VideoStream(usePiCamera=usingPiCamera, resolution=resolution, framerate=30).start() self.current_frame = cf time.sleep(2) if not usingPiCamera: frame = imutils.resize(self.current_frame.read(), width=resolution[0], height=resolution[1]) # Stream started, call current_frame.read() to get current frame
Example #2
Source File: Camera.py From SmoothStream with GNU General Public License v3.0 | 7 votes |
def start_capture(self, height=None, width=None, usingPiCamera=IS_RASPBERRY_PI, ): import imutils from imutils.video import VideoStream resolution = (self.height, self.width) if height: if width: resolution = (height, width) cf = VideoStream(usePiCamera=usingPiCamera, resolution=resolution, framerate=32).start() self.current_frame = cf time.sleep(2) if not usingPiCamera: frame = imutils.resize(self.current_frame.read(), width=resolution[0]) # Stream started, call current_frame.read() to get current frame
Example #3
Source File: pedestrian_detector.py From treasure-boxes with MIT License | 6 votes |
def detect(self): vframe = None while vframe is None: vframe = self.cap.read() image = vframe.get_bgr() # Reference: https://www.pyimagesearch.com/2015/11/09/pedestrian-detection-opencv/ # resize for (1) reducting run time, (2) improving accuracy imutils.resize(image, width=min(400, image.shape[1])) (rects, weights) = self.hog.detectMultiScale( image, winStride=(4, 4), padding=(8, 8), scale=1.05 ) # apply non-maxima suppression to the bounding boxes rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects]) pick = non_max_suppression(rects, probs=None, overlapThresh=0.65) for (xA, yA, xB, yB) in pick: cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2) scorer.imshow(1, image) return len(pick), int(time.mktime(vframe.datetime.timetuple()))
Example #4
Source File: VidStab.py From python_video_stab with MIT License | 6 votes |
def _resize_frame(self, frame): if self._processing_resize_kwargs == {}: if self.processing_max_dim: shape = frame.shape max_dim_size = max(shape) if max_dim_size <= self.processing_max_dim: self.processing_max_dim = max_dim_size self._processing_resize_kwargs = None else: max_dim_ind = shape.index(max_dim_size) max_dim_name = ['height', 'width'][max_dim_ind] self._processing_resize_kwargs = {max_dim_name: self.processing_max_dim} if self._processing_resize_kwargs is None: return frame resized = imutils.resize(frame, **self._processing_resize_kwargs) return resized
Example #5
Source File: mylib.py From anomaly-event-detection with MIT License | 6 votes |
def resizeToMainWindowSize ( image, winSize ): ''' ================================================== Resize the window size for larger than given image ================================================== Arguments: image: Image you want to resize winSize: Window size of the image Returns: Resize image of given window size ''' if type (winSize) == int: return cv2.resize (image, (winSize, winSize), interpolation=cv2.INTER_CUBIC) elif type (winSize) == []: return cv2.resize (image, (winSize[0], winSize[1]), interpolation=cv2.INTER_CUBIC) elif type (winSize) == (): return cv2.resize (image, (winSize), interpolation=cv2.INTER_CUBIC)
Example #6
Source File: captchahelper.py From DL4CVStarterBundle with GNU General Public License v3.0 | 6 votes |
def preprocess(image, width, height): # Grab the dimensions of the image, then initialize the padding values (h, w) = image.shape[:2] # If the width is greater than the height then resize along the width if w > h: image = imutils.resize(image, width=width) # Otherwise, the height is greater than the width so resize along the height else: image = imutils.resize(image, height=height) # Determine the padding values for the width and height to obtain the target dimensions pad_w = int((width - image.shape[1]) / 2.0) pad_h = int((height - image.shape[0]) / 2.0) # Pad the image then apply one more resizing to handle any rounding issues image = cv2.copyMakeBorder(image, pad_h, pad_h, pad_w, pad_w, cv2.BORDER_REPLICATE) image = cv2.resize(image, (width, height)) # Return the pre-processed image return image
Example #7
Source File: VehicleMoniter.py From Traffic-Rule-Violation-Detection-System with MIT License | 6 votes |
def getLicensePlateNumber(filer): try: js = api.recognize_file(filer, secret_key, country, recognize_vehicle=recognize_vehicle, state=state, return_image=return_image, topn=topn, prewarp=prewarp) js=js.to_dict() #js=list(str(js)) X1=js['results'][0]['coordinates'][0]['x'] Y1=js['results'][0]['coordinates'][0]['y'] X2=js['results'][0]['coordinates'][2]['x'] Y2=js['results'][0]['coordinates'][2]['y'] img=cv2.imread(filer) rimg=img[Y1:Y2,X1:X2] frame3=rimg img3 = Image.fromarray(frame3) w,h=img3.size asprto=w/h frame3=cv2.resize(frame3,(150,int(150/asprto))) cv2image3 = cv2.cvtColor(frame3, cv2.COLOR_BGR2RGBA) img3 = Image.fromarray(cv2image3) imgtk3 = ImageTk.PhotoImage(image=img3) display4.imgtk = imgtk3 #Shows frame for display 1 display4.configure(image=imgtk3) display5.configure(text=js['results'][0]['plate']) except ApiException as e: print("Exception: \n", e)
Example #8
Source File: captchahelper.py From DL4CVStarterBundle with GNU General Public License v3.0 | 6 votes |
def preprocess(image, width, height): # Grab the dimensions of the image, then initialize the padding values (h, w) = image.shape[:2] # If the width is greater than the height then resize along the width if w > h: image = imutils.resize(image, width=width) # Otherwise, the height is greater than the width so resize along the height else: image = imutils.resize(image, height=height) # Determine the padding values for the width and height to obtain the target dimensions pad_w = int((width - image.shape[1]) / 2.0) pad_h = int((height - image.shape[0]) / 2.0) # Pad the image then apply one more resizing to handle any rounding issues image = cv2.copyMakeBorder(image, pad_h, pad_h, pad_w, pad_w, cv2.BORDER_REPLICATE) image = cv2.resize(image, (width, height)) # Return the pre-processed image return image
Example #9
Source File: aspectawarepreprocessor.py From aiexamples with Apache License 2.0 | 6 votes |
def preprocess(self, image): # grab the dimensions of the image and then initialize # the deltas to use when cropping (h, w) = image.shape[:2] dW = 0 dH = 0 # if the width is smaller than the height, then resize # along the width (i.e., the smaller dimension) and then update # the deltas to crop the height to the desired dimension if w < h: image = imutils.resize(image, width=self.width, inter=self.inter) dH = int((image.shape[0] - self.height) / 2.0) else: image = imutils.resize(image, height=self.height, inter=self.inter) dW = int((image.shape[1] - self.width) / 2.0) # now that our images have ben resized, we need to re-grab the width # and height, followed by performing the crop (h, w) = image.shape[:2] image = image[dH:h-dH, dW:w-dW] return cv2.resize(image, (self.width, self.height), interpolation=self.inter)
Example #10
Source File: aspectawarepreprocessor.py From Smile-Detector with MIT License | 6 votes |
def preprocess(self, image): ''' grab the dimentions of the image and then initialize the deltas to use when cropping ''' (h, w) = image.shape[:2] dW = 0 dH = 0 # if the width is smaller than the height, then resize along the width and then update the daltas to crop the height to the desired dimension if w < h: image = imutils.resize(image, width=self.width, inter=self.inter) dH = int((image.shape[0] - self.height) / 2.0) # otherwise the height is smaller than the width so resize along the height and then update the deltas to crop along the width else: image = imutils.resize(image, height=self.height, inter=self.inter) dW = int((image.shape[1] - self.width) / 2.0) # now that our image have been resized, we need to re-grab the width and height, followed by performing the crop (h, w) = image.shape[:2] image = image[dH:h - dH, dW:w - dW] # finally, resize the image to the provided spatial dimentions to ensure our output image is always a fixed size return cv2.resize(image, (self.width, self.height), interpolation=self.inter)
Example #11
Source File: tracker.py From telloCV with Apache License 2.0 | 6 votes |
def get_frame(vid_stream, stream): """grab the current video frame""" frame = vid_stream.read() # handle the frame from VideoCapture or VideoStream frame = frame[1] if stream else frame # if we are viewing a video and we did not grab a frame, # then we have reached the end of the video if frame is None: return None else: frame = imutils.resize(frame, width=600) return frame
Example #12
Source File: frame_producer.py From eye_of_sauron with MIT License | 6 votes |
def transform(frame, frame_num, object_key="original", camera=0, verbose=False): """Serialize frame, create json message with serialized frame, camera number and timestamp. :param frame: numpy.ndarray, raw frame :param frame_num: frame number in the particular video/camera :param object_key: identifier for these objects :param camera: Camera Number the frame is from :param verbose: print out logs :return: A dict {"frame": string(base64encodedarray), "dtype": obj.dtype.str, "shape": obj.shape, "timestamp": time.time(), "camera": camera, "frame_num": frame_num} """ frame = imutils.resize(frame, width=400) if verbose: # print raw frame size print("\nRAW ARRAY SIZE: ", sys.getsizeof(frame)) # serialize frame frame_dict = np_to_json(frame.astype(np.uint8), prefix_name=object_key) # Metadata for frame message = {"timestamp": time.time(), "camera": camera, "frame_num": frame_num} # add frame and metadata related to frame message.update(frame_dict) if verbose: # print message size print("\nMESSAGE SIZE: ", sys.getsizeof(message)) return message
Example #13
Source File: image_utils.py From pytorch-widedeep with MIT License | 6 votes |
def preprocess(self, image: np.ndarray) -> np.ndarray: (h, w) = image.shape[:2] dW = 0 dH = 0 if w < h: image = imutils.resize(image, width=self.width, inter=self.inter) dH = int((image.shape[0] - self.height) / 2.0) else: image = imutils.resize(image, height=self.height, inter=self.inter) dW = int((image.shape[1] - self.width) / 2.0) (h, w) = image.shape[:2] image = image[dH : h - dH, dW : w - dW] return cv2.resize(image, (self.width, self.height), interpolation=self.inter)
Example #14
Source File: align_face.py From glow with MIT License | 6 votes |
def align_face(img): img = img[:, :, ::-1] # Convert from RGB to BGR format img = imutils.resize(img, width=800) # detect faces in the grayscale image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) rects = detector(gray, 2) if len(rects) > 0: # align the face using facial landmarks align_img = fa.align(img, gray, rects[0])[:, :, ::-1] align_img = np.array(Image.fromarray(align_img).convert('RGB')) return align_img, True else: # No face found return None, False # Input: img_path # Output: aligned_img if face_found, else None
Example #15
Source File: input_video_stream_paste_mask.py From face-detection-induction-course with MIT License | 6 votes |
def start(self): """ 启动程序 :return: """ self.console("程序启动成功.") self.init_mask() while self.listener: frame = self.read_data() frame = resize(frame, width=self.max_width) img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(img_gray, 0) faces = self.orientation(rects, img_gray) draw_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) if self.doing: self.drawing(draw_img, faces) self.animation_time += self.speed self.save_data(draw_img) if self.animation_time > self.duration: self.doing = False self.animation_time = 0 else: frame = cv2.cvtColor(np.asarray(draw_img), cv2.COLOR_RGB2BGR) cv2.imshow("hello mask", frame) self.listener_keys()
Example #16
Source File: align_face.py From pix2pix-flow with MIT License | 6 votes |
def align_face(img): img = img[:, :, ::-1] # Convert from RGB to BGR format img = imutils.resize(img, width=800) # detect faces in the grayscale image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) rects = detector(gray, 2) if len(rects) > 0: # align the face using facial landmarks align_img = fa.align(img, gray, rects[0])[:, :, ::-1] align_img = np.array(Image.fromarray(align_img).convert('RGB')) return align_img, True else: # No face found return None, False # Input: img_path # Output: aligned_img if face_found, else None
Example #17
Source File: alpr.py From zoneminder with GNU General Public License v2.0 | 6 votes |
def getscale(self): if g.config['resize'] != 'no': img = cv2.imread(self.filename) img_new = imutils.resize(img, width=min(int(g.config['resize']), img.shape[1])) oldh, oldw, _ = img.shape newh, neww, _ = img_new.shape rescale = True xfactor = neww / oldw yfactor = newh / oldh img = None img_new = None g.logger.debug( 'ALPR will use {}x{} but Yolo uses {}x{} so ALPR boxes will be scaled {}x and {}y' .format(oldw, oldh, neww, newh, xfactor, yfactor),level=2) else: xfactor = 1 yfactor = 1 return (xfactor, yfactor)
Example #18
Source File: amplify_color.py From Heart-rate-measurement-using-camera with Apache License 2.0 | 6 votes |
def mainLoop(self): frame = self.webcam.get_frame() f1 = imutils.resize(frame, width = 256) #crop_frame = frame[100:228,200:328] self.data_buffer.append(f1) self.run_color() #print(frame) #if len(self.vidmag_frames) > 0: #print(self.vidmag_frames[0]) cv2.putText(frame, "FPS "+str(float("{:.2f}".format(self.fps))), (20,420), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #frame[100:228,200:328] = cv2.convertScaleAbs(self.vidmag_frames[-1]) cv2.imshow("Original",frame) #f2 = imutils.resize(cv2.convertScaleAbs(self.vidmag_frames[-1]), width = 640) f2 = imutils.resize(cv2.convertScaleAbs(self.frame_out), width = 640) cv2.imshow("Color amplification",f2) self.key_handler() #if not the GUI cant show anything
Example #19
Source File: rule_30_and_game_of_life.py From rule-30-and-game-of-life with MIT License | 5 votes |
def main(): writer = video_writer.Writer(fps=FPS, high_quality=HIGH_QUALITY) animation = Rule30AndGameOfLife(STATE_WIDTH, STATE_HEIGHT) for _ in tqdm.trange(NUM_FRAMES): small_frame = animation.rgb enlarged_frame = imutils.resize(small_frame, VIDEO_WIDTH, VIDEO_HEIGHT, cv2.INTER_NEAREST) writer.add_frame(enlarged_frame) animation.step() writer.write(OUTPUT_PATH)
Example #20
Source File: digital_display_ocr.py From display_ocr with GNU General Public License v2.0 | 5 votes |
def cnvt_edged_image(img_arr, should_save=False): # ratio = img_arr.shape[0] / 300.0 image = imutils.resize(img_arr,height=300) gray_image = cv2.bilateralFilter(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),11, 17, 17) edged_image = cv2.Canny(gray_image, 30, 200) if should_save: cv2.imwrite('cntr_ocr.jpg') return edged_image
Example #21
Source File: ContinuousGesturePredictor.py From Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network with MIT License | 5 votes |
def resizeImage(imageName): basewidth = 100 img = Image.open(imageName) wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) img = img.resize((basewidth,hsize), Image.ANTIALIAS) img.save(imageName)
Example #22
Source File: ImageStitching.py From ImageProcessingProjects with MIT License | 5 votes |
def main(): ap = argparse.ArgumentParser("Image Stitching") ap.add_argument("-i1","--image1", required=True, help="Path to first image to be stiched") ap.add_argument("-i2","--image2", required=True, help="Path to second image to be stitched") args = vars(ap.parse_args()) imageA = cv2.imread(args["image1"]) imageB = cv2.imread(args["image2"]) imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) # stitch the images together to create a panorama #SIFT stitcher stitcher = ImageStitcher() result = stitcher.stitch([imageA, imageB], showMatches=False) # cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) #Surf Stitcher # stitcher = SurfStitcher(imageA) # stitcher.stitch(imageB) # stitcher.saveImage() # cv2.imshow("Result", stitcher.leftImage) # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite("results.jpg", result)
Example #23
Source File: read_frames_fast.py From imutils with MIT License | 5 votes |
def filterFrame(frame): frame = imutils.resize(frame, width=450) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = np.dstack([frame, frame, frame]) return frame # construct the argument parse and parse the arguments
Example #24
Source File: track_human_withHOG_SVM.py From Human-detection-and-Tracking with Apache License 2.0 | 5 votes |
def draw_Head_shoulders(frame): # cascade_path = "HS.xml" cascade_path = "haarcascade_profileface.xml" cascade = cv2.CascadeClassifier(cascade_path) frame = imutils.resize(frame, height=300) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) start = time.time() faces = cascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(10, 10), flags=0 ) end = time.time() total_time_taken = end - start print(total_time_taken) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.imshow("face and Shoulders", frame) key = cv2.waitKey(1) # construct the argument parse and parse the arguments # ap = argparse.ArgumentParser() # ap.add_argument("-i", "--images", required=True, help="path to images directory") # args = vars(ap.parse_args()) # initialize the HOG descriptor/person detector
Example #25
Source File: predict-from-video.py From facial-expression-recognition-using-cnn with GNU General Public License v3.0 | 5 votes |
def predict_emotion(self, image): image.resize([NETWORK.input_size, NETWORK.input_size], refcheck=False) emotion, confidence = predict(image, self.model, self.shape_predictor) return emotion, confidence
Example #26
Source File: create_face_model.py From Human-detection-and-Tracking with Apache License 2.0 | 5 votes |
def get_images_and_labels(path): """ convert images to matrices assign label to every image according to person Using test data to make the machine learn this data Args: path: path to images directory Returns: matrix of images, labels """ i = 0 image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')] images = [] labels = [] for image_path in image_paths: image_pil = Image.open(image_path).convert('L') image = np.array(image_pil, 'uint8') image = imutils.resize(image, width=min(500, image.shape[1])) nbr = int(os.path.split(image_path)[1].split( ".")[0].replace("subject", "")) faces = faceCascade.detectMultiScale(image) for (x, y, w, h) in faces: images.append(image[y: y + h, x: x + w]) # cv2.imwrite("subject02."+str(i)+".jpg",image[y: y + h, x: x + w]) # i=i+1 labels.append(nbr) cv2.imshow("Adding faces to traning set", image[y: y + h, x: x + w]) cv2.imshow('win', image[y: y + h, x: x + w]) cv2.waitKey(50) return images, labels
Example #27
Source File: CameraController.py From NaturewatchCameraServer with GNU General Public License v3.0 | 5 votes |
def run(self): while not self.is_stopped(): try: if picamera_exists: try: # Get image from Pi camera self.picamera_md_output.truncate(0) self.picamera_md_output.seek(0) self.picamera_md_stream.__next__() self.image = self.picamera_md_output.array if self.image is None: self.logger.warning("CameraController: got empty image.") time.sleep(0.01) except Exception as e: self.logger.error("CameraController: picamera update error.") self.logger.exception(e) self.initialise_picamera() time.sleep(0.02) else: # Get image from webcam if self.use_splitter_port: ret, self.splitter_image = self.capture.read() if self.splitter_image is not None: self.image = imutils.resize(self.splitter_image, width=self.width, height=self.height) else: ret, self.image = self.capture.read() if self.image is None: self.logger.warning("CameraController: got empty image.") except KeyboardInterrupt: self.logger.info("CameraController: received KeyboardInterrupt, shutting down ...") self.stop() # Stop thread
Example #28
Source File: face_utilities.py From Heart-rate-measurement-using-camera with Apache License 2.0 | 5 votes |
def get_landmarks(self, frame, type): ''' Get all facial landmarks in a face Args: frame (cv2 image): the original frame. In RGB format. type (str): 5 or 68 facial landmarks Outputs: shape (array): facial landmarks' co-ords in format of of tuples (x,y) ''' if self.predictor is None: print("[INFO] load " + type + " facial landmarks model ...") self.predictor = dlib.shape_predictor("../shape_predictor_" + type + "_face_landmarks.dat") print("[INFO] Load model - DONE!") if frame is None: return None, None # all face will be resized to a fix size, e.g width = 200 #face = imutils.resize(face, width=200) # face must be gray gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.face_detection(frame) if len(rects)<0 or len(rects)==0: return None, None shape = self.predictor(gray, rects[0]) shape = face_utils.shape_to_np(shape) # in shape, there are 68 pairs of (x, y) carrying coords of 68 points. # to draw landmarks, use: for (x, y) in shape: cv2.circle(image, (x, y), 1, (0, 0, 255), -1) return shape, rects
Example #29
Source File: mylib.py From anomaly-event-detection with MIT License | 5 votes |
def pyramid ( image, scale=1.5, minSize=(15, 15) ): ''' ======================== Returns an image pyramid ======================== Arguments: image: Image file or object scale: Decreasing with a specified ratio minSize: Minimum image size Return: image: Sliced image ''' # yield the original image yield image # keep looping over the pyramid while True: # compute the new dimensions of the image and resize it w = int (image.shape[1] / scale) image = imutils.resize (image, width=w) # if the resized image does not meet the supplied minimum # size, then stop constructing the pyramid if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]: break # yield the next image in the pyramid yield image
Example #30
Source File: general_utils.py From python_video_stab with MIT License | 5 votes |
def playback_video(display_frame, playback_flag, delay, max_display_width=750): if not playback_flag: return False if display_frame.shape[1] > max_display_width: display_frame = imutils.resize(display_frame, width=max_display_width) cv2.imshow('VidStab Playback ({} frame delay if using live video;' ' press Q or ESC to quit)'.format(delay), display_frame) key = cv2.waitKey(1) if key == ord("q") or key == 27: return True