Python cv2.boundingRect() Examples
The following are 30
code examples of cv2.boundingRect().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: pycv2.py From vrequest with MIT License | 17 votes |
def laplacian(filepathname): v = cv2.imread(filepathname) s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY) s = cv2.Laplacian(s, cv2.CV_16S, ksize=3) s = cv2.convertScaleAbs(s) cv2.imshow('nier',s) return s # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #2
Source File: generate_coco_json.py From coco-json-converter with GNU General Public License v3.0 | 14 votes |
def __get_annotation__(self, mask, image=None): _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentation = [] for contour in contours: # Valid polygons have >= 6 coordinates (3 points) if contour.size >= 6: segmentation.append(contour.flatten().tolist()) RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1]) RLE = cocomask.merge(RLEs) # RLE = cocomask.encode(np.asfortranarray(mask)) area = cocomask.area(RLE) [x, y, w, h] = cv2.boundingRect(mask) if image is not None: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.drawContours(image, contours, -1, (0,255,0), 1) cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2) cv2.imshow("", image) cv2.waitKey(1) return segmentation, [x, y, w, h], area
Example #3
Source File: size_detector.py From gaps with MIT License | 9 votes |
def _find_size_candidates(self, image): binary_image = self._filter_image(image) _, contours, _ = cv2.findContours(binary_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) size_candidates = [] for contour in contours: bounding_rect = cv2.boundingRect(contour) contour_area = cv2.contourArea(contour) if self._is_valid_contour(contour_area, bounding_rect): candidate = (bounding_rect[2] + bounding_rect[3]) / 2 size_candidates.append(candidate) return size_candidates
Example #4
Source File: pycv2.py From vrequest with MIT License | 8 votes |
def canny(filepathname, left=70, right=140): v = cv2.imread(filepathname) s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY) s = cv2.Canny(s, left, right) cv2.imshow('nier',s) return s # 圈出最小方矩形框,这里Canny算法后都是白色线条,所以取色范围 127-255 即可。 # ret, binary = cv2.threshold(s,127,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # 有约束的画框 # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # # cv2.drawContours(s,contours,-1,(0,0,255),3) # 画所有框 # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #5
Source File: tracking.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 7 votes |
def _append_boxes_from_saliency(self, proto_objects_map, box_all): """Adds to the list all bounding boxes found with the saliency map A saliency map is used to find objects worth tracking in each frame. This information is combined with a mean-shift tracker to find objects of relevance that move, and to discard everything else. :param proto_objects_map: proto-objects map of the current frame :param box_all: append bounding boxes from saliency to this list :returns: new list of all collected bounding boxes """ # find all bounding boxes in new saliency map box_sal = [] cnt_sal, _ = cv2.findContours(proto_objects_map, 1, 2) for cnt in cnt_sal: # discard small contours if cv2.contourArea(cnt) < self.min_cnt_area: continue # otherwise add to list of boxes found from saliency map box = cv2.boundingRect(cnt) box_all.append(box) return box_all
Example #6
Source File: AffineInvariantFeatures.py From DoNotSnap with GNU General Public License v3.0 | 7 votes |
def affine_skew(self, tilt, phi, img, mask=None): h, w = img.shape[:2] if mask is None: mask = np.zeros((h, w), np.uint8) mask[:] = 255 A = np.float32([[1, 0, 0], [0, 1, 0]]) if phi != 0.0: phi = np.deg2rad(phi) s, c = np.sin(phi), np.cos(phi) A = np.float32([[c, -s], [s, c]]) corners = [[0, 0], [w, 0], [w, h], [0, h]] tcorners = np.int32(np.dot(corners, A.T)) x, y, w, h = cv2.boundingRect(tcorners.reshape(1, -1, 2)) A = np.hstack([A, [[-x], [-y]]]) img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) if tilt != 1.0: s = 0.8*np.sqrt(tilt * tilt - 1) img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01) img = cv2.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST) A[0] /= tilt if phi != 0.0 or tilt != 1.0: h, w = img.shape[:2] mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST) Ai = cv2.invertAffineTransform(A) return img, mask, Ai
Example #7
Source File: load_saved_model.py From document-ocr with Apache License 2.0 | 7 votes |
def mask_to_bbox(mask, image, num_class, area_threhold=0, out_path=None, out_file_name=None): bbox_list = [] im = copy.copy(image) mask = mask.astype(np.uint8) for i in range(1, num_class, 1): c_bbox_list = [] c_mask = np.zeros_like(mask) c_mask[np.where(mask==i)] = 255 bimg , countours, hier = cv2.findContours(c_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in countours: area = cv2.contourArea(cnt) if area < area_threhold: continue epsilon = 0.005 * cv2.arcLength(cnt,True) approx = cv2.approxPolyDP(cnt,epsilon,True) (x, y, w, h) = cv2.boundingRect(approx) c_bbox_list.append([x, y, x+w, y+h]) if out_path is not None: color = COLOR_LIST[i-1] im=cv2.rectangle(im, pt1=(x, y), pt2=(x+w, y+h),color=color, thickness=2) bbox_list.append(c_bbox_list) if out_path is not None: outf = os.path.join(out_path, out_file_name) cv2.imwrite(outf, im) return bbox_list
Example #8
Source File: crop_morphology.py From oldnyc with Apache License 2.0 | 6 votes |
def remove_border(contour, ary): """Remove everything outside a border contour.""" # Use a rotated rectangle (should be a good approximation of a border). # If it's far from a right angle, it's probably two sides of a border and # we should use the bounding box instead. c_im = np.zeros(ary.shape) r = cv2.minAreaRect(contour) degs = r[2] if angle_from_right(degs) <= 10.0: box = cv2.cv.BoxPoints(r) box = np.int0(box) cv2.drawContours(c_im, [box], 0, 255, -1) cv2.drawContours(c_im, [box], 0, 0, 4) else: x1, y1, x2, y2 = cv2.boundingRect(contour) cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1) cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4) return np.minimum(c_im, ary)
Example #9
Source File: dataset.py From DenseFusion with MIT License | 6 votes |
def mask_to_bbox(mask): mask = mask.astype(np.uint8) contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) x = 0 y = 0 w = 0 h = 0 for contour in contours: tmp_x, tmp_y, tmp_w, tmp_h = cv2.boundingRect(contour) if tmp_w * tmp_h > w * h: x = tmp_x y = tmp_y w = tmp_w h = tmp_h return [x, y, w, h]
Example #10
Source File: CThermal.py From Thermal_Image_Analysis with MIT License | 6 votes |
def get_roi(thermal_image, thermal_np, raw_thermal_np, Contours, index, area_rect = None ): raw_roi_values = [] thermal_roi_values = [] indices = [] if area_rect is None: img2 = np.zeros( (thermal_image.shape[0], thermal_image.shape[1],1), np.uint8) cv.drawContours(img2 , Contours , index, 255 , -1 ) x,y,w,h = cv.boundingRect(Contours[index]) indices = np.arange(w*h) ind = np.where(img2[:, :, 0] == 255) indices = indices[np.where(img2[y:y+h,x:x+w,0].flatten() == 255)] raw_roi_values = raw_thermal_np[ind] thermal_roi_values = thermal_np[ind] else: x,y,w,h = area_rect raw_roi_values = raw_thermal_np[y:y+h, x:x+w] thermal_roi_values = thermal_np[y:y+h, x:x+w] return raw_roi_values, thermal_roi_values, indices
Example #11
Source File: motion.py From object-detection with MIT License | 6 votes |
def filter_prediction(self, output, image): if len(output) < 2: return pd.DataFrame() else: df = pd.DataFrame(output) df = df.assign( area=lambda x: df[0].apply(lambda x: cv2.contourArea(x)), bounding=lambda x: df[0].apply(lambda x: cv2.boundingRect(x)) ) df = df[df['area'] > MIN_AREA] df_filtered = pd.DataFrame( df['bounding'].values.tolist(), columns=['x1', 'y1', 'w', 'h']) df_filtered = df_filtered.assign( x1=lambda x: x['x1'].clip(0), y1=lambda x: x['y1'].clip(0), x2=lambda x: (x['x1'] + x['w']), y2=lambda x: (x['y1'] + x['h']), label=lambda x: x.index.astype(str), class_name=lambda x: x.index.astype(str), ) return df_filtered
Example #12
Source File: morpher.py From yry with Apache License 2.0 | 6 votes |
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None): face_mask = np.zeros(src_img.shape, dtype=src_img.dtype) for group in core.OVERLAY_POINTS: cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255)) r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]])) center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)) if mat_multiple: mat = cv2.getRotationMatrix2D(center, 0, mat_multiple) face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0])) if blur_detail_x and blur_detail_y: face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center) return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
Example #13
Source File: detect_tables.py From namsel with MIT License | 6 votes |
def find_boxes(tiff_fl, blur=False): im = Image.open(tiff_fl).convert('L') a = np.asarray(im) if blur: a = cv.GaussianBlur(a, (5, 5), 0) contours, hierarchy = cv.findContours(a.copy(), mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_SIMPLE) border_boxes = [] # n = np.ones_like(a) for j,cnt in enumerate(contours): cnt_len = cv.arcLength(cnt, True) orig_cnt = cnt.copy() cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True) if len(cnt) == 4 and ((a.shape[0]-3) * (a.shape[1] -3)) > cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1: b = cv.boundingRect(orig_cnt) x,y,w,h = b border_boxes.append(b) # cv.rectangle(n, (x,y), (x+w, y+h), 0) # cv.drawContours(n, [cnt], -1,0, thickness = 5) # Image.fromarray(n*255).show() return border_boxes
Example #14
Source File: Functions.py From LPEX with GNU General Public License v3.0 | 6 votes |
def __init__(self, cntr): self.contour = cntr self.boundingRect = cv2.boundingRect(self.contour) [x, y, w, h] = self.boundingRect self.boundingRectX = x self.boundingRectY = y self.boundingRectWidth = w self.boundingRectHeight = h self.boundingRectArea = self.boundingRectWidth * self.boundingRectHeight self.centerX = (self.boundingRectX + self.boundingRectX + self.boundingRectWidth) / 2 self.centerY = (self.boundingRectY + self.boundingRectY + self.boundingRectHeight) / 2 self.diagonalSize = math.sqrt((self.boundingRectWidth ** 2) + (self.boundingRectHeight ** 2)) self.aspectRatio = float(self.boundingRectWidth) / float(self.boundingRectHeight)
Example #15
Source File: HandRecognition.py From hand-gesture-recognition-opencv with MIT License | 6 votes |
def mark_hand_center(frame_in,cont): max_d=0 pt=(0,0) x,y,w,h = cv2.boundingRect(cont) for ind_y in xrange(int(y+0.3*h),int(y+0.8*h)): #around 0.25 to 0.6 region of height (Faster calculation with ok results) for ind_x in xrange(int(x+0.3*w),int(x+0.6*w)): #around 0.3 to 0.6 region of width (Faster calculation with ok results) dist= cv2.pointPolygonTest(cont,(ind_x,ind_y),True) if(dist>max_d): max_d=dist pt=(ind_x,ind_y) if(max_d>radius_thresh*frame_in.shape[1]): thresh_score=True cv2.circle(frame_in,pt,int(max_d),(255,0,0),2) else: thresh_score=False return frame_in,pt,max_d,thresh_score # 6. Find and display gesture
Example #16
Source File: find_contour_character.py From SpikeFlow with MIT License | 6 votes |
def sort_contours(cnts, method="left-to-right"): # initialize the reverse flag and sort index reverse = False i = 0 # handle if we need to sort in reverse if method == "right-to-left" or method == "bottom-to-top": reverse = True # handle if we are sorting against the y-coordinate rather than # the x-coordinate of the bounding box if method == "top-to-bottom" or method == "bottom-to-top": i = 1 # construct the list of bounding boxes and sort them from top to # bottom boundingBoxes = [cv2.boundingRect(c) for c in cnts] (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key=lambda b:b[1][i], reverse=reverse)) # return the list of sorted contours and bounding boxes return (cnts, boundingBoxes)
Example #17
Source File: image.py From uiautomator2 with MIT License | 6 votes |
def compare_ssim_debug(image_a, image_b, color=(255, 0, 0)): """ Args: image_a, image_b: opencv image or PIL.Image color: (r, g, b) eg: (255, 0, 0) for red Refs: https://www.pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/ """ ima, imb = conv2cv(image_a), conv2cv(image_b) score, diff = compare_ssim(ima, imb, full=True) diff = (diff * 255).astype('uint8') _, thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cv2color = tuple(reversed(color)) im = ima.copy() for c in cnts: x, y, w, h = cv2.boundingRect(c) cv2.rectangle(im, (x, y), (x+w, y+h), cv2color, 2) # todo: show image cv2pil(im).show() return im
Example #18
Source File: getFeatures.py From multi-object-tracking with GNU General Public License v3.0 | 6 votes |
def getFeatures(img,bbox,use_shi=False): n_object = np.shape(bbox)[0] N = 0 temp = np.empty((n_object,),dtype=np.ndarray) # temporary storage of x,y coordinates for i in range(n_object): (xmin, ymin, boxw, boxh) = cv2.boundingRect(bbox[i,:,:].astype(int)) roi = img[ymin:ymin+boxh,xmin:xmin+boxw] # cv2.imshow('roi',roi) if use_shi: corner_response = corner_shi_tomasi(roi) else: corner_response = corner_harris(roi) coordinates = peak_local_max(corner_response,num_peaks=20,exclude_border=2) coordinates[:,1] += xmin coordinates[:,0] += ymin temp[i] = coordinates if coordinates.shape[0] > N: N = coordinates.shape[0] x = np.full((N,n_object),-1) y = np.full((N,n_object),-1) for i in range(n_object): n_feature = temp[i].shape[0] x[0:n_feature,i] = temp[i][:,1] y[0:n_feature,i] = temp[i][:,0] return x,y
Example #19
Source File: data_preprocessing_autoencoder.py From AVSR-Deep-Speech with GNU General Public License v2.0 | 6 votes |
def crop_and_store(frame, mouth_coordinates, name): """ Args: 1. frame: The frame which has to be cropped. 2. mouth_coordinates: The coordinates which help in deciding which region is to be cropped. 3. name: The path name to be used for storing the cropped image. """ # Find bounding rectangle for mouth coordinates x, y, w, h = cv2.boundingRect(mouth_coordinates) mouth_roi = frame[y:y + h, x:x + w] h, w, channels = mouth_roi.shape # If the cropped region is very small, ignore this case. if h < 10 or w < 10: return resized = resize(mouth_roi, 32, 32) cv2.imwrite(name, resized)
Example #20
Source File: PossibleChar.py From ALPR-Indonesia with MIT License | 6 votes |
def __init__(self, _contour): self.contour = _contour self.boundingRect = cv2.boundingRect(self.contour) [intX, intY, intWidth, intHeight] = self.boundingRect self.intBoundingRectX = intX self.intBoundingRectY = intY self.intBoundingRectWidth = intWidth self.intBoundingRectHeight = intHeight self.intBoundingRectArea = self.intBoundingRectWidth * self.intBoundingRectHeight self.intCenterX = (self.intBoundingRectX + self.intBoundingRectX + self.intBoundingRectWidth) / 2 self.intCenterY = (self.intBoundingRectY + self.intBoundingRectY + self.intBoundingRectHeight) / 2 self.fltDiagonalSize = math.sqrt((self.intBoundingRectWidth ** 2) + (self.intBoundingRectHeight ** 2)) self.fltAspectRatio = float(self.intBoundingRectWidth) / float(self.intBoundingRectHeight) # end constructor # end class
Example #21
Source File: plate_locate.py From EasyPR-python with Apache License 2.0 | 6 votes |
def calcSafeRect(self, roi, src): ''' return [x, y, w, h] ''' box = cv2.boxPoints(roi) x, y, w, h = cv2.boundingRect(box) src_h, src_w, _ = src.shape tl_x = x if x > 0 else 0 tl_y = y if y > 0 else 0 br_x = x + w - 1 if x + w - 1 < src_w else src_w - 1 br_y = y + h - 1 if y + h - 1 < src_h else src_h - 1 roi_w = br_x - tl_x roi_h = br_y - tl_y if roi_w <= 0 or roi_h <= 0: return [tl_x, tl_y, roi_w, roi_h], False return [tl_x, tl_y, roi_w, roi_h], True
Example #22
Source File: pycv2.py From vrequest with MIT License | 6 votes |
def sobel(filepathname): v = cv2.imread(filepathname) s = cv2.cvtColor(v,cv2.COLOR_BGR2GRAY) x, y = cv2.Sobel(s,cv2.CV_16S,1,0), cv2.Sobel(s,cv2.CV_16S,0,1) s = cv2.convertScaleAbs(cv2.subtract(x,y)) s = cv2.blur(s,(9,9)) cv2.imshow('nier',s) return s # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #23
Source File: Predict_Digits_RealTime.py From MNIST-Deep-Learning with GNU General Public License v3.0 | 6 votes |
def Get_MaxROI(contours, imgW,imgH): minDist = 1000000 imgCX,imgCY= imgW/2, imgH/2 idx =0 aId =-1 for cnt in contours: idx += 1 x,y,w,h = cv2.boundingRect(cnt) #roi=image[y:y+h,x:x+w] #cv2.imwrite(str(idx) + '.jpg', roi) #cv2.rectangle(image,(x,y),(x+w,y+h),(200,200,0),2) minWH=min(w,h) maxWH=max(w,h) cx,cy = (x+w/2), (y+h/2) dist = (cx-imgCX)**2 + (cy-imgCY)**2 if (maxWH>minSize) and (maxWH<maxSize) and (dist<minDist): minDist = dist # minA = w*h aId = idx-1 return aId
Example #24
Source File: Grouping.py From CSGNet with MIT License | 6 votes |
def tightboundingbox(self, image): ret, thresh = cv2.threshold(np.array(image, dtype=np.uint8), 0, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) bb = [] for c in contours: x, y, w, h = cv2.boundingRect(c) # +1 is done to encapsulate entire figure w += 2 h += 2 x -= 1 y -= 1 x = np.max([0, x]) y = np.max([0, y]) bb.append([y, x, w, h]) bb = self.nms(bb) return bb
Example #25
Source File: crop_morphology.py From Python-Code with MIT License | 6 votes |
def remove_border(contour, ary): """Remove everything outside a border contour.""" # Use a rotated rectangle (should be a good approximation of a border). # If it's far from a right angle, it's probably two sides of a border and # we should use the bounding box instead. c_im = np.zeros(ary.shape) r = cv2.minAreaRect(contour) degs = r[2] if angle_from_right(degs) <= 10.0: box = cv2.cv.BoxPoints(r) box = np.int0(box) cv2.drawContours(c_im, [box], 0, 255, -1) cv2.drawContours(c_im, [box], 0, 0, 4) else: x1, y1, x2, y2 = cv2.boundingRect(contour) cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1) cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4) return np.minimum(c_im, ary)
Example #26
Source File: auxiliary_function.py From roc with MIT License | 6 votes |
def find_bounding_box(pane, bounding_box_lower_thresholds, bounding_box_upper_thresholds, sort=True): # thresholds: turple # dimension_resized: turple segmented_pictures = [] rect_coordinates = [] width_lower_threshold, height_lower_threshold = bounding_box_lower_thresholds width_upper_threshold, height_upper_threshold = bounding_box_upper_thresholds contours, hierarchy = cv2.findContours(pane, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: (x, y, w, h) = cv2.boundingRect(contour) if h > height_lower_threshold and w > width_lower_threshold and h <= height_upper_threshold and w <= width_upper_threshold: rect_coordinates.append((x, y, w, h)) else: continue if sort: x_coordinates = [x for (x,y,w,h) in rect_coordinates] rect_coordinates= [e for _,e in sorted(zip(x_coordinates,rect_coordinates))] return rect_coordinates
Example #27
Source File: object_detection_using_color.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 5 votes |
def locate_object(frame, object_hist): # convert to HSV hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # apply back projection to image using object_hist as # the model histogram object_segment = cv2.calcBackProject( [hsv_frame], [0, 1], object_hist, [0, 180, 0, 256], 1) # find the contours img, contours, _ = cv2.findContours( object_segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) flag = None max_area = 0 # find the contour with the greatest area for (i, c) in enumerate(contours): area = cv2.contourArea(c) if area > max_area: max_area = area flag = i # get the rectangle if flag is not None and max_area > 1000: cnt = contours[flag] coords = cv2.boundingRect(cnt) return coords return None # compute the color histogram
Example #28
Source File: test.py From object-localization with MIT License | 5 votes |
def main(): model = create_model() model.load_weights(WEIGHTS_FILE) for filename in glob.glob(IMAGES): unscaled = cv2.imread(filename) image = cv2.resize(unscaled, (IMAGE_SIZE, IMAGE_SIZE)) feat_scaled = preprocess_input(np.array(image, dtype=np.float32)) region = np.squeeze(model.predict(feat_scaled[np.newaxis,:])) output = np.zeros(region.shape, dtype=np.uint8) output[region > 0.5] = 1 contours, _ = cv2.findContours(output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: approx = cv2.approxPolyDP(cnt, EPSILON * cv2.arcLength(cnt, True), True) x, y, w, h = cv2.boundingRect(approx) x0 = np.rint(x * unscaled.shape[1] / output.shape[1]).astype(int) x1 = np.rint((x + w) * unscaled.shape[1] / output.shape[1]).astype(int) y0 = np.rint(y * unscaled.shape[0] / output.shape[0]).astype(int) y1 = np.rint((y + h) * unscaled.shape[0] / output.shape[0]).astype(int) cv2.rectangle(unscaled, (x0, y0), (x1, y1), (0, 255, 0), 1) cv2.imshow("image", unscaled) cv2.waitKey(0) cv2.destroyAllWindows()
Example #29
Source File: preprocess.py From filmstrip with MIT License | 5 votes |
def extractROI(sourcePath, destPath, points, verbose=False): info = getInfo(sourcePath) # x, y, width, height = cv2.boundingRect(points) # print(x,y,width,height)/ x = points[0][0] y = points[0][1] width = points[1][0] - points[0][0] height = points[1][1] - points[0][1] cap = cv2.VideoCapture(sourcePath) fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') out = cv2.VideoWriter(destPath, fourcc, info["fps"], (width, height)) ret = True while(cap.isOpened() and ret): ret, frame = cap.read() if frame is None: break roi = frame[y:y+height, x:x+width] out.write(roi) if verbose: cv2.imshow('frame', roi) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() out.release() cv2.destroyAllWindows() # Groups a list in n sized tuples
Example #30
Source File: handdetector.py From deep-prior with GNU General Public License v3.0 | 5 votes |
def estimateHandsize(self, contours, com, cube=(250, 250, 250), tol=0): """ Estimate hand size from depth image :param contours: contours of hand :param com: center of mass :param cube: default cube :param tol: tolerance to be added to all sides :return: metric cube for cropping (x, y, z) """ x, y, w, h = cv2.boundingRect(contours) # drawing = numpy.zeros((480, 640), dtype=float) # cv2.drawContours(drawing, [contours], 0, (255, 0, 244), 1, 8) # cv2.rectangle(drawing, (x, y), (x+w, y+h), (244, 0, 233), 2, 8, 0) # cv2.imshow("contour", drawing) # convert to cube xstart = (com[0] - w / 2.) * com[2] / self.fx xend = (com[0] + w / 2.) * com[2] / self.fx ystart = (com[1] - h / 2.) * com[2] / self.fy yend = (com[1] + h / 2.) * com[2] / self.fy szx = xend - xstart szy = yend - ystart sz = (szx + szy) / 2. cube = (sz + tol, sz + tol, sz + tol) return cube