Python cv2.EVENT_MOUSEMOVE Examples
The following are 30
code examples of cv2.EVENT_MOUSEMOVE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: mouse_drawing.py From Mastering-OpenCV-4-with-Python with MIT License | 8 votes |
def draw_circle(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDBLCLK: print("event: EVENT_LBUTTONDBLCLK") cv2.circle(image, (x, y), 10, colors['magenta'], -1) if event == cv2.EVENT_MOUSEMOVE: print("event: EVENT_MOUSEMOVE") if event == cv2.EVENT_LBUTTONUP: print("event: EVENT_LBUTTONUP") if event == cv2.EVENT_LBUTTONDOWN: print("event: EVENT_LBUTTONDOWN") # We create the canvas to draw: 600 x 600 pixels, 3 channels, uint8 (8-bit unsigned integers) # We set the background to black using np.zeros():
Example #2
Source File: 04_interacting_video.py From OpenCV-3-x-with-Python-By-Example with MIT License | 7 votes |
def draw_rectangle(event, x, y, flags, params): global x_init, y_init, drawing def update_pts(): params["top_left_pt"] = (min(x_init, x), min(y_init, y)) params["bottom_right_pt"] = (max(x_init, x), max(y_init, y)) img[y_init:y, x_init:x] = 255 - img[y_init:y, x_init:x] if event == cv2.EVENT_LBUTTONDOWN: drawing = True x_init, y_init = x, y elif event == cv2.EVENT_MOUSEMOVE and drawing: update_pts() elif event == cv2.EVENT_LBUTTONUP: drawing = False update_pts()
Example #3
Source File: DrawShape.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 7 votes |
def drawShape(event, x, y, flags, params): global mode, drawing, xi, yi if event == cv2.EVENT_LBUTTONDOWN: drawing = True xi, yi = x, y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: if mode == True: cv2.rectangle(image, (xi, yi), (x, y), (0, 255, 0), -1) else: cv2.circle(image, (x, y), 5, (255, 0, 0), -1) elif event == cv2.EVENT_LBUTTONUP: drawing = False if mode == True: cv2.rectangle(image, (xi, yi), (x, y), (0, 255, 0), -1) else: cv2.circle(image, (x, y), 5, (255, 0, 0), -1)
Example #4
Source File: webcam_demo.py From THOR with MIT License | 7 votes |
def on_mouse(event, x, y, flags, params): global mousedown, mouseupdown, drawnBox, boxToDraw, initialize, boxToDraw_xywh if event == cv2.EVENT_LBUTTONDOWN: drawnBox[[0,2]] = x drawnBox[[1,3]] = y mousedown = True mouseupdown = False elif mousedown and event == cv2.EVENT_MOUSEMOVE: drawnBox[2] = x drawnBox[3] = y elif event == cv2.EVENT_LBUTTONUP: drawnBox[2] = x drawnBox[3] = y mousedown = False mouseupdown = True initialize = True boxToDraw = drawnBox.copy() boxToDraw[[0, 2]] = np.sort(boxToDraw[[0, 2]]) boxToDraw[[1, 3]] = np.sort(boxToDraw[[1, 3]]) boxToDraw_xywh = xyxy_to_xywh(boxToDraw)
Example #5
Source File: drawer.py From Handwriting-Recognition with Apache License 2.0 | 7 votes |
def mouse_callback(self, event, x, y, flags, params): """ Callback method for drawing circles on an image """ # left mouse button is pressed if event == cv2.EVENT_LBUTTONDOWN: self.mouse_pressed = True # mouse pointer has moved over the window elif event == cv2.EVENT_MOUSEMOVE: if self.mouse_pressed: cv2.circle(img=self.img, center=(x, y), radius=20, color=self.char_color, thickness=-1) # left mouse button is released elif event == cv2.EVENT_LBUTTONUP: self.mouse_pressed = False cv2.circle(img=self.img, center=(x, y), radius=20, color=self.char_color, thickness=-1)
Example #6
Source File: draw.py From QuickDraw with MIT License | 7 votes |
def paint_draw(event, x, y, flags, param): global ix, iy, drawing, mode if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix, iy = x, y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: cv2.line(image, (ix, iy), (x, y), (255, 255, 255), 5) ix = x iy = y elif event == cv2.EVENT_LBUTTONUP: drawing = False cv2.line(image, (ix, iy), (x, y), (255, 255, 255), 5) ix = x iy = y return x, y
Example #7
Source File: opencv_draw.py From Python-Code with MIT License | 7 votes |
def draw_circle(event,x,y,flags,param): global ix,iy,drawing,mode if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix,iy = x,y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: if mode == True: cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) else: cv2.circle(img,(x,y),5,(0,0,255),-1) elif event == cv2.EVENT_LBUTTONUP: drawing = False if mode == True: cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) else: cv2.circle(img,(x,y),5,(0,0,255),-1)
Example #8
Source File: pose_estimation.py From OpenCV-3-x-with-Python-By-Example with MIT License | 7 votes |
def mouse_event(self, event, x, y, flags, param): x, y = np.int16([x, y]) # Detecting the mouse button down event if event == cv2.EVENT_LBUTTONDOWN: self.drag_start = (x, y) self.tracking_state = 0 if self.drag_start: if event == cv2.EVENT_MOUSEMOVE: h, w = param["frame"].shape[:2] xo, yo = self.drag_start x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y])) x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y])) self.selected_rect = None if x1-x0 > 0 and y1-y0 > 0: self.selected_rect = (x0, y0, x1, y1) elif event == cv2.EVENT_LBUTTONUP: self.drag_start = None if self.selected_rect is not None: self.callback_func(self.selected_rect) self.selected_rect = None self.tracking_state = 1
Example #9
Source File: object_tracker.py From OpenCV-3-x-with-Python-By-Example with MIT License | 7 votes |
def mouse_event(self, event, x, y, flags, param): x, y = np.int16([x, y]) # Detecting the mouse button down event if event == cv2.EVENT_LBUTTONDOWN: self.drag_start = (x, y) self.tracking_state = 0 if self.drag_start: if event == cv2.EVENT_MOUSEMOVE: h, w = self.frame.shape[:2] xo, yo = self.drag_start x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y])) x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y])) self.selection = None if x1-x0 > 0 and y1-y0 > 0: self.selection = (x0, y0, x1, y1) elif event == cv2.EVENT_LBUTTONUP: self.drag_start = None if self.selection is not None: self.tracking_state = 1 # Method to start tracking the object
Example #10
Source File: bbox_labeling.py From dlcv_for_beginners with BSD 3-Clause "New" or "Revised" License | 7 votes |
def _mouse_ops(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: self._drawing = True self._pt0 = (x, y) elif event == cv2.EVENT_LBUTTONUP: self._drawing = False self._pt1 = (x, y) self._bboxes.append((self._cur_label, (self._pt0, self._pt1))) elif event == cv2.EVENT_MOUSEMOVE: self._pt1 = (x, y) elif event == cv2.EVENT_RBUTTONUP: if self._bboxes: self._bboxes.pop()
Example #11
Source File: py3_raspberry_pi.py From display_ocr with GNU General Public License v2.0 | 7 votes |
def draw_shape(event,x,y,flags,param): global ix,iy,drawing,mode,rectangle if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix,iy = x,y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: rectangle = define_rectangle(iy, ix, y, x) elif event == cv2.EVENT_LBUTTONUP: drawing = False if not (ix == x and iy == y): rectangle = define_rectangle(iy, ix, y, x) # GUI INPUTS
Example #12
Source File: OCR.py From display_ocr with GNU General Public License v2.0 | 7 votes |
def draw_rectangle(event, x, y, flags, param): global start_x, start_y, end_x, end_y, drawing, expected_value if event == cv2.EVENT_LBUTTONDOWN: # menu position if y < 40: # menu map if x > 8 and x < 148: SaveImage(event) if x > 153 and x < 190: OnClose(event) if x > 195 and x < 252: print "OpenSource Development: https://github.com/arturaugusto/display_ocr.\nBased on examples availables at https://code.google.com/p/python-tesseract/.\nGPLv2 License" else: drawing = True start_x, start_y = x, y end_x, end_y = x, y elif event == cv2.EVENT_LBUTTONUP: drawing = False #start_x,start_y = -1,-1 #end_x,end_y = -1,-1 elif event == cv2.EVENT_MOUSEMOVE and drawing: if y < 40: end_x, end_y = x, 41 else: end_x, end_y = x, y
Example #13
Source File: annotate.py From event-Python with MIT License | 7 votes |
def annotate(event, x, y, flags, param): """Callback for function 'annotate_tracks'. Tracks cursor and detects if mouse position is to be saved as a trackpoint. Track points are saved once per frame if the left mouse button is held down. """ global is_read global px, py if event == cv2.EVENT_MOUSEMOVE: px, py = x, y if event == cv2.EVENT_LBUTTONDOWN: is_read = 1 if event == cv2.EVENT_LBUTTONUP: is_read = 0
Example #14
Source File: webcam_demo.py From Re3 with GNU General Public License v3.0 | 7 votes |
def on_mouse(event, x, y, flags, params): global mousedown, mouseupdown, drawnBox, boxToDraw, initialize if event == cv2.EVENT_LBUTTONDOWN: drawnBox[[0,2]] = x drawnBox[[1,3]] = y mousedown = True mouseupdown = False elif mousedown and event == cv2.EVENT_MOUSEMOVE: drawnBox[2] = x drawnBox[3] = y elif event == cv2.EVENT_LBUTTONUP: drawnBox[2] = x drawnBox[3] = y mousedown = False mouseupdown = True initialize = True boxToDraw = drawnBox.copy() boxToDraw[[0,2]] = np.sort(boxToDraw[[0,2]]) boxToDraw[[1,3]] = np.sort(boxToDraw[[1,3]])
Example #15
Source File: mouse_painting.py From PyIntroduction with MIT License | 7 votes |
def _callBack(self, event, x, y, flags, param): # マウス左ボタンが押された時の処理 if event == cv2.EVENT_LBUTTONDOWN: self._doEvent(self._press_func, x, y) self._is_drag = True # マウス左ドラッグ時の処理 elif event == cv2.EVENT_MOUSEMOVE: if self._is_drag: self._doEvent(self._drag_func, x, y) # マウス左ボタンが離された時の処理 elif event == cv2.EVENT_LBUTTONUP: self._doEvent(self._release_func, x, y) self._is_drag = False # 描画用の空画像作成
Example #16
Source File: enter_camera_data.py From Multi-Camera-Object-Tracking with GNU General Public License v3.0 | 5 votes |
def mouse_evt(event,x,y,flags,param): global cons,cams, selected_cam if(event == cv2.EVENT_LBUTTONDOWN): if(placing): if(len(cams)<max_cams): cams.append((x,y)) cons = np.concatenate((cons,np.zeros((1,len(cons)),dtype = np.bool)), axis=0) cons = np.concatenate((cons, np.zeros((len(cons), 1),dtype = np.bool)),axis=1) update_cameras_img() cv2.imshow("Floorplan",cam_img) elif(not placing): indx = closest_cam(x,y) if(indx != None): if(selected_cam == None): selected_cam = indx elif(selected_cam != None): if(indx != selected_cam): cons[indx][selected_cam] = not cons[indx][selected_cam] cons[selected_cam][indx] = not cons[selected_cam][indx] selected_cam = None update_connections_img() cv2.imshow("Floorplan",conn_img) elif(indx == selected_cam): selected_cam = None elif(event == cv2.EVENT_MOUSEMOVE and placing == False and selected_cam != None): cur_img = conn_img.copy() cv2.line(cur_img, (cams[selected_cam][0],cams[selected_cam][1]), (x,y), (0,0,0), 4) cv2.imshow("Floorplan", cur_img) elif(event == cv2.EVENT_MBUTTONDOWN): if(placing): if(len(cams)>0): indx = closest_cam(x,y) if(indx != None): cons = np.delete(cons, indx, axis = 0) cons = np.delete(cons, indx, axis = 1) del cams[indx] update_cameras_img() cv2.imshow("Floorplan",cam_img)
Example #17
Source File: ex_particle_filter_mouse_tracking.py From deepgaze with MIT License | 5 votes |
def draw_circle(event,x,y,flags,param): #if event == cv2.EVENT_MOUSEMOVE: if event == cv2.EVENT_LBUTTONDOWN: #Predict the position of the pointer my_particle.predict(x_velocity=0, y_velocity=0, std=std) #Estimate the next position using the internal model x_estimated, y_estimated, _, _ = my_particle.estimate() #Update the position of the particles based on the measurement. #Adding some noise to the measurement. noise_coefficient = np.random.uniform(low=0.0, high=10.0) x_measured = x + np.random.randn() * noise_coefficient y_measured = y + np.random.randn() * noise_coefficient my_particle.update(x_measured, y_measured) #Drawing the circles for the mouse position the #estimation and the particles. for i in range(0, tot_particles): x_particle, y_particle = my_particle.returnParticlesCoordinates(i) cv2.circle(img,(x_particle, y_particle),2,(0,0,255),-1) #RED: Particles cv2.circle(img,(x, y),2,(0,255,0),-1) #GREEN: Mouse position cv2.circle(img,(x_estimated, y_estimated),2,(255,0,0),-1) #BLUE: Filter estimation #Print general information print("Total Particles: " + str(tot_particles)) print("Effective N: " + str(my_particle.returnParticlesContribution())) print("Measurement Noise: " + str(noise_coefficient) + "/10") print("x=" + str(x) + "; y=" + str(y) + " | " + "x_measured=" + str(int(x_measured)) + "; y_measured=" + str(int(y_measured)) + " | " + "x_estimated=" + str(int(x_estimated)) + "; y_estimated=" + str(int(y_estimated))) #print(my_particle.weights) #uncomment to print the weights #print(my_particle.particles) #uncomment to print the particle position print("") #if(my_particle.returnParticlesContribution() < 8): my_particle.resample()
Example #18
Source File: simple-ide.py From ATX with Apache License 2.0 | 5 votes |
def make_mouse_callback(imgs, ref_pt): # initialize the list of reference points and boolean indicating # whether cropping is being performed or not cropping = [False] clone = imgs[0] def _click_and_crop(event, x, y, flags, param): # grab references to the global variables # global ref_pt, cropping # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_LBUTTONDOWN: ref_pt[0] = (x, y) cropping[0] = True # check to see if the left mouse button was released elif event == cv2.EVENT_LBUTTONUP: # record the ending (x, y) coordinates and indicate that # the cropping operation is finished ref_pt[1] = (x, y) cropping[0] = False # draw a rectangle around the region of interest imgs[1] = image = clone.copy() cv2.rectangle(image, ref_pt[0], ref_pt[1], (0, 255, 0), 2) cv2.imshow("image", image) elif event == cv2.EVENT_MOUSEMOVE and cropping[0]: img2 = clone.copy() cv2.rectangle(img2, ref_pt[0], (x, y), (0, 255, 0), 2) imgs[1] = image = img2 cv2.imshow("image", image) return _click_and_crop
Example #19
Source File: draw.py From open_model_zoo with Apache License 2.0 | 5 votes |
def mouse_callback(event, x, y, flags, params): global previous_position, theta, phi, should_rotate, scale_dx, scale_dy if event == cv2.EVENT_LBUTTONDOWN: previous_position = [x, y] should_rotate = True if event == cv2.EVENT_MOUSEMOVE and should_rotate: theta += (x - previous_position[0]) / scale_dx * 2 * math.pi phi -= (y - previous_position[1]) / scale_dy * 2 * math.pi * 2 phi = max(min(math.pi / 2, phi), -math.pi / 2) previous_position = [x, y] if event == cv2.EVENT_LBUTTONUP: should_rotate = False
Example #20
Source File: annotation.py From Walk-Assistant with GNU General Public License v3.0 | 5 votes |
def click(event, x, y, flags, param): global clicks, l_down, r_down if event == cv2.EVENT_LBUTTONDOWN or (event == cv2.EVENT_MOUSEMOVE and l_down): l_down = True x = max(0, min(x, WIDTH-1)) y = max(0, min(y, HEIGHT-1)) r = y // KERNEL c = x // KERNEL grid[r][c] = 1 elif event == cv2.EVENT_LBUTTONUP: l_down = False if event == cv2.EVENT_RBUTTONDOWN or (event == cv2.EVENT_MOUSEMOVE and r_down): r_down = True r = y // KERNEL c = x // KERNEL grid[r][c] = 0 elif event == cv2.EVENT_RBUTTONUP: r_down = False
Example #21
Source File: run.py From KCFpy with MIT License | 5 votes |
def draw_boundingbox(event, x, y, flags, param): global selectingObject, initTracking, onTracking, ix, iy, cx,cy, w, h if event == cv2.EVENT_LBUTTONDOWN: selectingObject = True onTracking = False ix, iy = x, y cx, cy = x, y elif event == cv2.EVENT_MOUSEMOVE: cx, cy = x, y elif event == cv2.EVENT_LBUTTONUP: selectingObject = False if(abs(x-ix)>10 and abs(y-iy)>10): w, h = abs(x - ix), abs(y - iy) ix, iy = min(x, ix), min(y, iy) initTracking = True else: onTracking = False elif event == cv2.EVENT_RBUTTONDOWN: onTracking = False if(w>0): ix, iy = x-w/2, y-h/2 initTracking = True
Example #22
Source File: track.py From animal-tracking with Creative Commons Zero v1.0 Universal | 5 votes |
def drawFloorCrop(event, x, y, flags, params): global perspectiveMatrix, name, RENEW_TETRAGON imgCroppingPolygon = np.zeros_like(params['imgFloorCorners']) if event == cv2.EVENT_RBUTTONUP: cv2.destroyWindow(f'Floor Corners for {name}') if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP: RENEW_TETRAGON = True h = params['imgFloorCorners'].shape[0] # delete 5th extra vertex of the floor cropping tetragon params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0) params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0] # Sort cropping tetragon vertices counter-clockwise starting with top left params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name]) # Get the matrix of perspective transformation params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2)) tetragonVertices = np.float32(params['croppingPolygons'][name]) tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]]) perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd) if event == cv2.EVENT_LBUTTONDOWN: if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON: params['croppingPolygons'][name] = np.array([[0,0]]) RENEW_TETRAGON = False if len(params['croppingPolygons'][name]) == 1: params['croppingPolygons'][name][0] = [x,y] params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0) if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON): params['croppingPolygons'][name][-1] = [x,y] if len(params['croppingPolygons'][name]) > 1: cv2.fillPoly( imgCroppingPolygon, [np.reshape( params['croppingPolygons'][name], (len(params['croppingPolygons'][name]),2) )], BGR_COLOR['green'], cv2.LINE_AA) imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.) cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon)
Example #23
Source File: jobs_manual.py From faceswap with GNU General Public License v3.0 | 5 votes |
def on_event(self, event, x, y, flags, param): # pylint: disable=unused-argument,invalid-name """ Handle the mouse events """ # pylint: disable=no-member if self.interface.get_edit_mode() != "Edit": return logger.trace("Mouse event: (event: %s, x: %s, y: %s, flags: %s, param: %s", event, x, y, flags, param) if not self.mouse_state and event not in (cv2.EVENT_LBUTTONDOWN, cv2.EVENT_MBUTTONDOWN): return self.initialize() if event in (cv2.EVENT_LBUTTONUP, cv2.EVENT_MBUTTONUP): self.mouse_state = None self.last_move = None elif event == cv2.EVENT_LBUTTONDOWN: self.mouse_state = "left" self.set_bounding_box(x, y) elif event == cv2.EVENT_MBUTTONDOWN: self.mouse_state = "middle" self.set_bounding_box(x, y) elif event == cv2.EVENT_MOUSEMOVE: if self.mouse_state == "left": self.move_bounding_box(x, y) elif self.mouse_state == "middle": self.resize_bounding_box(x, y)
Example #24
Source File: run.py From KCFnb with MIT License | 5 votes |
def draw_boundingbox(event, x, y, flags, param): global selectingObject, initTracking, onTracking, ix, iy, cx,cy, w, h if event == cv2.EVENT_LBUTTONDOWN: selectingObject = True onTracking = False ix, iy = x, y cx, cy = x, y elif event == cv2.EVENT_MOUSEMOVE: cx, cy = x, y elif event == cv2.EVENT_LBUTTONUP: selectingObject = False if(abs(x-ix)>10 and abs(y-iy)>10): w, h = abs(x - ix), abs(y - iy) ix, iy = min(x, ix), min(y, iy) initTracking = True else: onTracking = False elif event == cv2.EVENT_RBUTTONDOWN: onTracking = False if(w>0): ix, iy = x-w/2, y-h/2 initTracking = True
Example #25
Source File: mnist_calc.py From ncappzoo with MIT License | 5 votes |
def _mouse_event(self, event, x, y, flags, param): """Event listener for mouse events.""" if event == cv2.EVENT_LBUTTONDOWN: if self._equals_sign.contains_point(x, y): # Equal sign was clicked self.submit() self._draw_results() elif self._clear_all_button.contains_point(x, y): # Clear was clicked self._clear_ui() self._draw_ui() elif self._operator.contains_point(x, y): # The operator was clicked, swap to the next operator self._operator.clear() if self._operator is self._plus_sign: self._operator = self._minus_sign elif self._operator is self._minus_sign: self._operator = self._multiplication_sign elif self._operator is self._multiplication_sign: self._operator = self._division_sign elif self._operator is self._division_sign: self._operator = self._plus_sign self._operator.draw() else: self._drawing = True elif event == cv2.EVENT_MOUSEMOVE and self._drawing: if self._operand1.contains_point(x, y) or self._operand2.contains_point(x, y): # Draw if this is inside an operand rectangle if self._last_point: cv2.line(self._canvas, self._last_point, (x, y), (0, 0, 0), 30) self._last_point = (x, y) else: self._last_point = (x, y) else: # Drawing outside the boundaries, forget last point so line won't connect when re-entering boundary self._last_point = None elif event == cv2.EVENT_LBUTTONUP: self._drawing = False self._last_point = None
Example #26
Source File: mouse_drawing_circles_and_text.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def draw_circle(event, x, y, flags, param): """Mouse callback function""" global circles if event == cv2.EVENT_LBUTTONDBLCLK: # Add the circle with coordinates x,y print("event: EVENT_LBUTTONDBLCLK") circles.append((x, y)) if event == cv2.EVENT_RBUTTONDBLCLK: # Delete all circles (clean the screen) print("event: EVENT_RBUTTONDBLCLK") circles[:] = [] elif event == cv2.EVENT_RBUTTONDOWN: # Delete last added circle print("event: EVENT_RBUTTONDOWN") try: circles.pop() except (IndexError): print("no circles to delete") if event == cv2.EVENT_MOUSEMOVE: print("event: EVENT_MOUSEMOVE") if event == cv2.EVENT_LBUTTONUP: print("event: EVENT_LBUTTONUP") if event == cv2.EVENT_LBUTTONDOWN: print("event: EVENT_LBUTTONDOWN") # Structure to hold the created circles:
Example #27
Source File: draw_mask.py From DeepMosaics with GNU General Public License v3.0 | 5 votes |
def draw_circle(event,x,y,flags,param): global ix,iy,drawing,brushsize if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix,iy = x,y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: cv2.circle(img_drawn,(x,y),brushsize,(0,255,0),-1) elif event == cv2.EVENT_LBUTTONUP: drawing = False cv2.circle(img_drawn,(x,y),brushsize,(0,255,0),-1)
Example #28
Source File: draw.py From lightweight-human-pose-estimation-3d-demo.pytorch with Apache License 2.0 | 5 votes |
def mouse_callback(event, x, y, flags, params): global previous_position, theta, phi, should_rotate, scale_dx, scale_dy if event == cv2.EVENT_LBUTTONDOWN: previous_position = [x, y] should_rotate = True if event == cv2.EVENT_MOUSEMOVE and should_rotate: theta += (x - previous_position[0]) / scale_dx * 6.2831 # 360 deg phi -= (y - previous_position[1]) / scale_dy * 6.2831 * 2 # 360 deg phi = max(min(3.1415 / 2, phi), -3.1415 / 2) previous_position = [x, y] if event == cv2.EVENT_LBUTTONUP: should_rotate = False
Example #29
Source File: demo.py From DeepGrabCut-PyTorch with MIT License | 5 votes |
def interactive_drawing(event, x, y, flag, params): global xs, ys, ix, iy, drawing, image, output, left, right, up, down if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix, iy = x, y xs, ys = x, y left = min(left, x) right = max(right, x) up = min(up, y) down = max(down, y) elif event == cv2.EVENT_MOUSEMOVE: if drawing is True: cv2.line(image, (ix, iy), (x, y), (0, 0, 255), 2) cv2.line(output, (ix, iy), (x, y), (255, 255, 255), 1) ix = x iy = y left = min(left, x) right = max(right, x) up = min(up, y) down = max(down, y) elif event == cv2.EVENT_LBUTTONUP: drawing = False cv2.line(image, (ix, iy), (x, y), (0, 0, 255), 2) cv2.line(output, (ix, iy), (x, y), (255, 255, 255), 1) ix = x iy = y cv2.line(image, (ix, iy), (xs, ys), (0, 0, 255), 2) cv2.line(output, (ix, iy), (xs, ys), (255, 255, 255), 1) return x, y
Example #30
Source File: CThermal.py From Thermal_Image_Analysis with MIT License | 5 votes |
def draw_spots(event, x, y, flags, params): point = params[0] flag = params[1] point.clear() if event == cv.EVENT_MOUSEMOVE: if CFlir.drawing == True: point.append(x) point.append(y) elif event == cv.EVENT_LBUTTONDOWN: CFlir.drawing == False point.append(x) point.append(y) flag[0] = False