Python cv2.polylines() Examples
The following are 30
code examples of cv2.polylines().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: tgc_visualizer.py From TGC-Designer-Tools with Apache License 2.0 | 8 votes |
def drawSplinesOnImage(splines, color, im, pc, image_scale): for s in splines: # Get the shape of this spline and draw it on the image nds = [] for wp in s["waypoints"]: nds.append(pc.tgcToCV2(wp["waypoint"]["x"], wp["waypoint"]["y"], image_scale)) # Don't try to draw malformed splines if len(nds) == 0: continue # Uses points and not image pixels, so flip the x and y nds = np.array(nds) nds[:,[0, 1]] = nds[:,[1, 0]] nds = np.int32([nds]) # Bug with fillPoly, needs explict cast to 32bit thickness = int(s["width"]) if(thickness < image_scale): thickness = int(image_scale) if s["isFilled"]: cv2.fillPoly(im, nds, color, lineType=cv2.LINE_AA) else: cv2.polylines(im, nds, s["isClosed"], color, thickness, lineType=cv2.LINE_AA)
Example #2
Source File: video.py From pysot with Apache License 2.0 | 7 votes |
def draw_box(self, roi, img, linewidth, color, name=None): """ roi: rectangle or polygon img: numpy array img linewith: line width of the bbox """ if len(roi) > 6 and len(roi) % 2 == 0: pts = np.array(roi, np.int32).reshape(-1, 1, 2) color = tuple(map(int, color)) img = cv2.polylines(img, [pts], True, color, linewidth) pt = (pts[0, 0, 0], pts[0, 0, 1]-5) if name: img = cv2.putText(img, name, pt, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1) elif len(roi) == 4: if not np.isnan(roi[0]): roi = list(map(int, roi)) color = tuple(map(int, color)) img = cv2.rectangle(img, (roi[0], roi[1]), (roi[0]+roi[2], roi[1]+roi[3]), color, linewidth) if name: img = cv2.putText(img, name, (roi[0], roi[1]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1) return img
Example #3
Source File: annotate.py From faceswap with GNU General Public License v3.0 | 6 votes |
def draw_landmarks_mesh(self, color_id=4, thickness=1): """ Draw the facial landmarks """ color = self.colors[color_id] facial_landmarks_idxs = OrderedDict([("mouth", (48, 68)), ("right_eyebrow", (17, 22)), ("left_eyebrow", (22, 27)), ("right_eye", (36, 42)), ("left_eye", (42, 48)), ("nose", (27, 36)), ("jaw", (0, 17)), ("chin", (8, 11))]) for alignment in self.alignments: landmarks = alignment["landmarks_xy"] logger.trace("Drawing Landmarks Mesh: (landmarks: %s, color: %s, thickness: %s)", landmarks, color, thickness) for key, val in facial_landmarks_idxs.items(): points = np.array([landmarks[val[0]:val[1]]], np.int32) fill_poly = bool(key in ("right_eye", "left_eye", "mouth")) cv2.polylines(self.image, points, fill_poly, color, thickness)
Example #4
Source File: drawing.py From adience_align with MIT License | 6 votes |
def draw_rect(img, r, angle = 0, color=(255,255,255), thickness = 4, alpha = 0.5): ''' accepts: 1. a (x,y,dx,dy) list 4. a [(x,y,dx,dy),score] list, as returned by cv2.CascadeClassifier.detectMultiScaleWithScores() 5. a CascadeResult object ''' if type(r) == CascadeResult: color = tuple(list(color) + [alpha]) cv2.polylines(img, pts = [r.points_int], isClosed = True, color = color, thickness = thickness) return elif len(r)==4 or len(r)==2: # [x,y,dx,dy] if len(r)==2: if len(r[0]) == 4: r = r[0] else: raise Exception("bad input to draw_rect...") pt1 = int(round(r[0])), int(round(r[1])) pt2 = int(round(r[0]+r[2])), int(round(r[1]+r[3])) color = tuple(list(color) + [alpha]) cv2.rectangle(img, pt1, pt2, color, thickness = thickness) else: raise Exception("bad input to draw_rect...") return
Example #5
Source File: visualize.py From ExtremeNet with BSD 3-Clause "New" or "Revised" License | 6 votes |
def vis_octagon(img, extreme_points, col, border_thick=2): """Visualizes a single binary mask.""" img = img.astype(np.uint8) # COL = (col).astype(np.uint8).tolist() # print('col', COL) # octagon = get_octagon(extreme_points) # octagon = np.array(octagon).reshape(8, 1, 2).astype(np.int32) # cv2.polylines(img, [octagon], # True, COL, border_thick) mask = extreme_point_to_octagon_mask( extreme_points, img.shape[0], img.shape[1]) img = vis_mask(img, mask, col) return img.astype(np.uint8)
Example #6
Source File: utils.py From PIXOR with MIT License | 6 votes |
def get_bev(velo_array, label_list = None, scores = None): map_height = velo_array.shape[0] intensity = np.zeros((velo_array.shape[0], velo_array.shape[1], 3), dtype=np.uint8) # val = 1 - velo_array[::-1, :, -1] val = (1 - velo_array[::-1, :, :-1].max(axis=2)) * 255 intensity[:, :, 0] = val intensity[:, :, 1] = val intensity[:, :, 2] = val # FLip in the x direction if label_list is not None: for corners in label_list: plot_corners = corners / 0.1 plot_corners[:, 1] += int(map_height // 2) plot_corners[:, 1] = map_height - plot_corners[:, 1] plot_corners = plot_corners.astype(int).reshape((-1, 1, 2)) cv2.polylines(intensity, [plot_corners], True, (255, 0, 0), 2) cv2.line(intensity, tuple(plot_corners[2, 0]), tuple(plot_corners[3, 0]), (0, 0, 255), 3) return intensity
Example #7
Source File: pose_estimation.py From OpenCV-3-x-with-Python-By-Example with MIT License | 6 votes |
def start(self): paused = False while True: if not paused or self.frame is None: ret, frame = self.cap.read() scaling_factor = self.scaling_factor frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA) if not ret: break self.frame = frame.copy() img = self.frame.copy() if not paused and self.rect is not None: tracked = self.pose_tracker.track_target(self.frame) for item in tracked: cv2.polylines(img, [np.int32(item.quad)], True, (255, 255, 255), 2) for (x, y) in np.int32(item.points_cur): cv2.circle(img, (x, y), 2, (255, 255, 255)) self.roi_selector.draw_rect(img, self.rect) cv2.imshow(self.win_name, img) ch = cv2.waitKey(1) if ch == ord(' '): paused = not paused if ch == ord('c'): self.pose_tracker.clear_targets() if ch == 27: break
Example #8
Source File: utils.py From posenet-python with Apache License 2.0 | 6 votes |
def draw_skel_and_kp( img, instance_scores, keypoint_scores, keypoint_coords, min_pose_score=0.5, min_part_score=0.5): out_img = img adjacent_keypoints = [] cv_keypoints = [] for ii, score in enumerate(instance_scores): if score < min_pose_score: continue new_keypoints = get_adjacent_keypoints( keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score) adjacent_keypoints.extend(new_keypoints) for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]): if ks < min_part_score: continue cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks)) out_img = cv2.drawKeypoints( out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)) return out_img
Example #9
Source File: annotate.py From faceswap with GNU General Public License v3.0 | 6 votes |
def draw_extract_box(self, color_id=2, thickness=1): """ Draw the extracted face box """ if not self.roi: return color = self.colors[color_id] for idx, roi in enumerate(self.roi): logger.trace("Drawing Extract Box: (idx: %s, roi: %s)", idx, roi) top_left = [point for point in roi.squeeze()[0]] top_left = (top_left[0], top_left[1] - 10) cv2.putText(self.image, str(idx), top_left, cv2.FONT_HERSHEY_DUPLEX, 1.0, color, thickness) cv2.polylines(self.image, [roi], True, color, thickness)
Example #10
Source File: lk_track.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def run(self): while True: ret, frame = self.cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) cv2.circle(vis, (x, y), 2, (0, 255, 0), -1) self.tracks = new_tracks cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in self.tracks]: cv2.circle(mask, (x, y), 5, 0, -1) p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) self.frame_idx += 1 self.prev_gray = frame_gray cv2.imshow('lk_track', vis) ch = cv2.waitKey(1) if ch == 27: break
Example #11
Source File: xmlPAGE.py From P2PaLA with GNU General Public License v3.0 | 6 votes |
def build_baseline_mask(self, out_size, color, line_width): """ Builds a "image" mask of Baselines on XML-PAGE """ size = self.get_size()[::-1] # --- Although NNLLoss requires an Long Tensor (np.int -> torch.LongTensor) # --- is better to keep mask as np.uint8 to save disk space, then change it # --- to np.int @ dataloader only if NNLLoss is going to be used. mask = np.zeros((out_size[0], out_size[1]), np.uint8) scale_factor = out_size / size for element in self.root.findall("".join([".//", self.base, "Baseline"])): # --- get element coords str_coords = element.attrib.get("points").split() coords = np.array([i.split(",") for i in str_coords]).astype(np.int) coords = (coords * np.flip(scale_factor, 0)).astype(np.int) cv2.polylines(mask, [coords.reshape(-1, 1, 2)], False, color, line_width) if not mask.any(): self.logger.warning( "File {} do not contains baselines".format(self.name) ) return mask
Example #12
Source File: debugger.py From mxnet-centernet with MIT License | 6 votes |
def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'): bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 for cat in dets: cl = (self.colors[cat - 1, 0, 0]).tolist() lc = (250, 152, 12) for i in range(len(dets[cat])): if dets[cat][i, -1] > center_thresh: dim = dets[cat][i, 5:8] loc = dets[cat][i, 8:11] rot_y = dets[cat][i, 11] rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] for k in range(4): rect[k] = self.project_3d_to_bird(rect[k]) # cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1) cv2.polylines( bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], True,lc,2,lineType=cv2.LINE_AA) for e in [[0, 1]]: t = 4 if e == [0, 1] else 1 cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), (rect[e[1]][0], rect[e[1]][1]), lc, t, lineType=cv2.LINE_AA) self.imgs[img_id] = bird_view
Example #13
Source File: frame.py From twitchslam with MIT License | 6 votes |
def annotate(self, img): # paint annotations on the image for i1 in range(len(self.kpus)): u1, v1 = int(round(self.kpus[i1][0])), int(round(self.kpus[i1][1])) if self.pts[i1] is not None: if len(self.pts[i1].frames) >= 5: cv2.circle(img, (u1, v1), color=(0,255,0), radius=3) else: cv2.circle(img, (u1, v1), color=(0,128,0), radius=3) # draw the trail pts = [] lfid = None for f, idx in zip(self.pts[i1].frames[-9:][::-1], self.pts[i1].idxs[-9:][::-1]): if lfid is not None and lfid-1 != f.id: break pts.append(tuple(map(lambda x: int(round(x)), f.kpus[idx]))) lfid = f.id if len(pts) >= 2: cv2.polylines(img, np.array([pts], dtype=np.int32), False, myjet[len(pts)]*255, thickness=1, lineType=16) else: cv2.circle(img, (u1, v1), color=(0,0,0), radius=3) return img # inverse of intrinsics matrix
Example #14
Source File: train_imgnet.py From 3DOD_thesis with MIT License | 6 votes |
def draw_3dbbox_from_keypoints(img, keypoints): img = np.copy(img) color = [190, 0, 255] # (BGR) front_color = [255, 230, 0] # (BGR) lines = [[0, 3, 7, 4, 0], [1, 2, 6, 5, 1], [0, 1], [2, 3], [6, 7], [4, 5]] # (0 -> 3 -> 7 -> 4 -> 0, 1 -> 2 -> 6 -> 5 -> 1, etc.) colors = [front_color, color, color, color, color, color] for n, line in enumerate(lines): bg = colors[n] cv2.polylines(img, np.int32([keypoints[line]]), False, bg, lineType=cv2.LINE_AA, thickness=2) return img # NOTE! NOTE! change this to not overwrite all log data when you train the model:
Example #15
Source File: video.py From pyCFTrackers with MIT License | 6 votes |
def draw_box(self, roi, img, linewidth, color, name=None): """ roi: rectangle or polygon img: numpy array img linewith: line width of the bbox """ if len(roi) > 6 and len(roi) % 2 == 0: pts = np.array(roi, np.int32).reshape(-1, 1, 2) color = tuple(map(int, color)) img = cv2.polylines(img, [pts], True, color, linewidth) pt = (pts[0, 0, 0], pts[0, 0, 1]-5) if name: img = cv2.putText(img, name, pt, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1) elif len(roi) == 4: if not np.isnan(roi[0]): roi = list(map(int, roi)) color = tuple(map(int, color)) img = cv2.rectangle(img, (roi[0], roi[1]), (roi[0]+roi[2], roi[1]+roi[3]), color, linewidth) if name: img = cv2.putText(img, name, (roi[0], roi[1]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1) return img
Example #16
Source File: tracking.py From pi-tracking-telescope with MIT License | 6 votes |
def getFrame(self): if self.frame==None: return None self.mutex.acquire() try: vis_frame = self.frame.copy() finally: self.mutex.release() rows,cols = vis_frame.shape[:2] #M = cv2.getRotationMatrix2D((cols/2,rows/2),45,1) #vis_frame = cv2.warpAffine(vis_frame,M,(cols,rows)) #draw_str(vis_frame, (20, 20), 'track count: %d' % len(tracker.tracks)) if self.draw_trails: cv2.polylines(vis_frame, [np.int32(tr) for tr in [[t[:2] for t in tr] for tr in self.tracks]], False, (0, 255, 0)) for tr in self.tracks: cv2.circle(vis_frame, (tr[-1][0], tr[-1][1]), 2, (0, 255, 0), -1) return vis_frame
Example #17
Source File: OSMTGC.py From TGC-Designer-Tools with Apache License 2.0 | 6 votes |
def drawWayOnImage(way, color, im, pc, image_scale, thickness=-1, x_offset=0.0, y_offset=0.0): # Get the shape of this way and draw it as a poly nds = [] for node in way.get_nodes(resolve_missing=True): # Allow automatically resolving missing nodes, but this is VERY slow with the API requests, try to request them above instead nds.append(pc.latlonToCV2(node.lat, node.lon, image_scale, x_offset, y_offset)) # Uses points and not image pixels, so flip the x and y nds = np.array(nds) nds[:,[0, 1]] = nds[:,[1, 0]] nds = np.int32([nds]) # Bug with fillPoly, needs explict cast to 32bit cv2.fillPoly(im, nds, color) # Add option to draw shape again, but with thick line # Use this to automatically expand some shapes, for example water # For better masking if thickness > 0: # Need to draw again since fillPoly has no line thickness options that I've found cv2.polylines(im, nds, True, color, thickness, lineType=cv2.LINE_AA)
Example #18
Source File: run_demo_server.py From ICPR_TextDection with GNU General Public License v3.0 | 5 votes |
def draw_illu(illu, rst): for t in rst['text_lines']: d = np.array([t['x0'], t['y0'], t['x1'], t['y1'], t['x2'], t['y2'], t['x3'], t['y3']], dtype='int32') d = d.reshape(-1, 2) cv2.polylines(illu, [d], isClosed=True, color=(255, 255, 0)) return illu
Example #19
Source File: tools.py From ICPR_TextDection with GNU General Public License v3.0 | 5 votes |
def vis_img_bbox(img_file, gt_file): img = cv2.imread(img_file)[:, :, ::-1] gtbboxes = np.asarray(read_from_gt(gt_file)[0]) print np.shape(gtbboxes) for box in gtbboxes: cv2.polylines(img[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0), thickness=1) show_image_from_array(img)
Example #20
Source File: page_dewarp.py From page_dewarp with MIT License | 5 votes |
def visualize_span_points(name, small, span_points, corners): display = small.copy() for i, points in enumerate(span_points): points = norm2pix(small.shape, points, False) mean, small_evec = cv2.PCACompute(points.reshape((-1, 2)), None, maxComponents=1) dps = np.dot(points.reshape((-1, 2)), small_evec.reshape((2, 1))) dpm = np.dot(mean.flatten(), small_evec.flatten()) point0 = mean + small_evec * (dps.min()-dpm) point1 = mean + small_evec * (dps.max()-dpm) for point in points: cv2.circle(display, fltp(point), 3, CCOLORS[i % len(CCOLORS)], -1, cv2.LINE_AA) cv2.line(display, fltp(point0), fltp(point1), (255, 255, 255), 1, cv2.LINE_AA) cv2.polylines(display, [norm2pix(small.shape, corners, True)], True, (255, 255, 255)) debug_show(name, 3, 'span points', display)
Example #21
Source File: blob_clustering.py From aggregation with Apache License 2.0 | 5 votes |
def __find_positive_regions__(self,user_ids,markings,dimensions): """ give a set of polygon markings made by people, determine the area(s) in the image which were outlined by enough people. "positive" => true positive as opposed to noise or false positive """ unique_users = set(user_ids) aggregate_polygon_list = [] for i in unique_users: user_polygons = [markings[j] for j,u in enumerate(user_ids) if u == i] template = np.zeros(dimensions,np.uint8) # start by drawing the outline of the area cv2.polylines(template,user_polygons,True,255) # now take the EXTERNAL contour # the docker image has an older version of opencv where findcontours only returns 2 values if cv2.__version__ == '2.4.8': contours, hierarchy = cv2.findContours(template,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) else: im2, contours, hierarchy = cv2.findContours(template,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) template2 = np.zeros(dimensions,np.uint8) cv2.drawContours(template2,contours,-1,1,-1) aggregate_polygon_list.append(template2) aggregate_polygon = np.sum(aggregate_polygon_list,axis=0,dtype=np.uint8) # the threshold determines the minimum number of people who have outlined an area threshold = int(len(set(user_ids))/2) ret,thresh1 = cv2.threshold(aggregate_polygon,threshold,255,cv2.THRESH_BINARY) return thresh1
Example #22
Source File: trans_utils.py From ocr.pytorch with MIT License | 5 votes |
def rgb2gray(filename): im = Image.open(filename).convert('L') im.show() new_image = Image.new("L", (im.width + 6, im.height + 6), 0) out_image = Image.new("L", (im.width + 6, im.height + 6), 0) new_image.paste(im, (3, 3, im.width + 3, im.height + 3)) im = getcvimage(im) new_image = getcvimage(new_image) out_image = getcvimage(out_image) _, thresh = cv2.threshold(new_image, 0, 255, cv2.THRESH_OTSU) pshowone(thresh) image, contours, hierarchy = cv2.findContours(thresh, 3, 2) # cnt = contours[0] # hull = cv2.convexHull(cnt) # image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) print(len(contours)) cv2.polylines(out_image, contours, True, 255) # cv2.fillPoly(image, [cnt], 255) image = getpilimage(out_image) im = getpilimage(im) image = image.crop((3, 3, im.width + 3, im.height + 3)) # char_color = image.crop((3,3,char_image.width + 3, char_image.height + 3)) image.show() return
Example #23
Source File: head_pose_estimation.py From PINTO_model_zoo with MIT License | 5 votes |
def draw_annotation_box(image, rotation_vector, translation_vector, camera_matrix, dist_coeefs, color=(255, 255, 255), line_width=2): """Draw a 3D box as annotation of pose""" point_3d = [] rear_size = 75 rear_depth = 0 point_3d.append((-rear_size, -rear_size, rear_depth)) point_3d.append((-rear_size, rear_size, rear_depth)) point_3d.append((rear_size, rear_size, rear_depth)) point_3d.append((rear_size, -rear_size, rear_depth)) point_3d.append((-rear_size, -rear_size, rear_depth)) front_size = 100 front_depth = 100 point_3d.append((-front_size, -front_size, front_depth)) point_3d.append((-front_size, front_size, front_depth)) point_3d.append((front_size, front_size, front_depth)) point_3d.append((front_size, -front_size, front_depth)) point_3d.append((-front_size, -front_size, front_depth)) point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3) # Map to 2d image points (point_2d, _) = cv2.projectPoints(point_3d, rotation_vector, translation_vector, camera_matrix, dist_coeefs) point_2d = np.int32(point_2d.reshape(-1, 2)) # Draw all the lines cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA) cv2.line(image, tuple(point_2d[1]), tuple(point_2d[6]), color, line_width, cv2.LINE_AA) cv2.line(image, tuple(point_2d[2]), tuple(point_2d[7]), color, line_width, cv2.LINE_AA) cv2.line(image, tuple(point_2d[3]), tuple(point_2d[8]), color, line_width, cv2.LINE_AA)
Example #24
Source File: pose_estimator.py From head-pose-estimation with MIT License | 5 votes |
def draw_annotation_box(self, image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2): """Draw a 3D box as annotation of pose""" point_3d = [] rear_size = 75 rear_depth = 0 point_3d.append((-rear_size, -rear_size, rear_depth)) point_3d.append((-rear_size, rear_size, rear_depth)) point_3d.append((rear_size, rear_size, rear_depth)) point_3d.append((rear_size, -rear_size, rear_depth)) point_3d.append((-rear_size, -rear_size, rear_depth)) front_size = 100 front_depth = 100 point_3d.append((-front_size, -front_size, front_depth)) point_3d.append((-front_size, front_size, front_depth)) point_3d.append((front_size, front_size, front_depth)) point_3d.append((front_size, -front_size, front_depth)) point_3d.append((-front_size, -front_size, front_depth)) point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3) # Map to 2d image points (point_2d, _) = cv2.projectPoints(point_3d, rotation_vector, translation_vector, self.camera_matrix, self.dist_coeefs) point_2d = np.int32(point_2d.reshape(-1, 2)) # Draw all the lines cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA) cv2.line(image, tuple(point_2d[1]), tuple( point_2d[6]), color, line_width, cv2.LINE_AA) cv2.line(image, tuple(point_2d[2]), tuple( point_2d[7]), color, line_width, cv2.LINE_AA) cv2.line(image, tuple(point_2d[3]), tuple( point_2d[8]), color, line_width, cv2.LINE_AA)
Example #25
Source File: optical_flow_tracker.py From head-pose-estimation with MIT License | 5 votes |
def draw_track(self, image): """Draw track lines on image.""" cv2.polylines(image, [np.int32(track) for track in self.tracks], False, (0, 255, 0))
Example #26
Source File: tools.py From keras-ocr with MIT License | 5 votes |
def drawBoxes(image, boxes, color=(255, 0, 0), thickness=5, boxes_format='boxes'): """Draw boxes onto an image. Args: image: The image on which to draw the boxes. boxes: The boxes to draw. color: The color for each box. thickness: The thickness for each box. boxes_format: The format used for providing the boxes. Options are "boxes" which indicates an array with shape(N, 4, 2) where N is the number of boxes and each box is a list of four points) as provided by `keras_ocr.detection.Detector.detect`, "lines" (a list of lines where each line itself is a list of (box, character) tuples) as provided by `keras_ocr.data_generation.get_image_generator`, or "predictions" where boxes is by itself a list of (word, box) tuples as provided by `keras_ocr.pipeline.Pipeline.recognize` or `keras_ocr.recognition.Recognizer.recognize_from_boxes`. """ if len(boxes) == 0: return image canvas = image.copy() if boxes_format == 'lines': revised_boxes = [] for line in boxes: for box, _ in line: revised_boxes.append(box) boxes = revised_boxes if boxes_format == 'predictions': revised_boxes = [] for _, box in boxes: revised_boxes.append(box) boxes = revised_boxes for box in boxes: cv2.polylines(img=canvas, pts=box[np.newaxis].astype('int32'), color=color, thickness=thickness, isClosed=True) return canvas
Example #27
Source File: kalman_1.py From Python-Code with MIT License | 5 votes |
def drawLines(img, points, r, g, b): ''' Draws lines ''' cv2.polylines(img, [np.int32(points)], isClosed=False, color=(r, g, b))
Example #28
Source File: annotation.py From imantics with MIT License | 5 votes |
def draw(self, image, color=None, thickness=3): """ Draws the polygons to the image array of shape (width, height, 3) *This function modifies the image array* :param color: RGB color repersentation :type color: tuple, list :param thickness: pixel thickness of box :type thinkness: int """ color = Color.create(color).rgb image_copy = image.copy() cv2.polylines(image_copy, self.points, True, color, thickness) return image_copy
Example #29
Source File: debugger.py From mxnet-centernet with MIT License | 5 votes |
def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'): alpha = 0.5 bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 for ii, (dets, lc, cc) in enumerate( [(dets_gt, (12, 49, 250), (0, 0, 255)), (dets_dt, (250, 152, 12), (255, 0, 0))]): # cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3) for cat in dets: cl = (self.colors[cat - 1, 0, 0]).tolist() for i in range(len(dets[cat])): if dets[cat][i, -1] > center_thresh: dim = dets[cat][i, 5:8] loc = dets[cat][i, 8:11] rot_y = dets[cat][i, 11] rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] for k in range(4): rect[k] = self.project_3d_to_bird(rect[k]) if ii == 0: cv2.fillPoly( bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], lc,lineType=cv2.LINE_AA) else: cv2.polylines( bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], True,lc,2,lineType=cv2.LINE_AA) # for e in [[0, 1], [1, 2], [2, 3], [3, 0]]: for e in [[0, 1]]: t = 4 if e == [0, 1] else 1 cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), (rect[e[1]][0], rect[e[1]][1]), lc, t, lineType=cv2.LINE_AA) self.imgs[img_id] = bird_view
Example #30
Source File: datasets_imgnet.py From 3DOD_thesis with MIT License | 5 votes |
def draw_3dbbox_from_keypoints(img, keypoints): img = np.copy(img) color = [190, 0, 255] # (BGR) front_color = [255, 230, 0] # (BGR) lines = [[0, 3, 7, 4, 0], [1, 2, 6, 5, 1], [0, 1], [2, 3], [6, 7], [4, 5]] # (0 -> 3 -> 7 -> 4 -> 0, 1 -> 2 -> 6 -> 5 -> 1, etc.) colors = [front_color, color, color, color, color, color] for n, line in enumerate(lines): bg = colors[n] cv2.polylines(img, np.int32([keypoints[line]]), False, bg, lineType=cv2.LINE_AA, thickness=2) return img