Python cv2.minEnclosingCircle() Examples

The following are 21 code examples of cv2.minEnclosingCircle(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: helpers.py    From Color-Tracker with MIT License 9 votes vote down vote up
def get_contour_centers(contours: np.ndarray) -> np.ndarray:
    """
    Calculate the centers of the contours
    :param contours: Contours detected with find_contours
    :return: object centers as numpy array
    """

    if len(contours) == 0:
        return np.array([])

    # ((x, y), radius) = cv2.minEnclosingCircle(c)
    centers = np.zeros((len(contours), 2), dtype=np.int16)
    for i, c in enumerate(contours):
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        centers[i] = center
    return centers 
Example #2
Source File: camera_opencv.py    From Adeept_RaspTank with MIT License 9 votes vote down vote up
def findColor(self, frame_image):
        hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, colorLower, colorUpper)#1
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)[-2]
        center = None
        if len(cnts) > 0:
            self.findColorDetection = 1
            c = max(cnts, key=cv2.contourArea)
            ((self.box_x, self.box_y), self.radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            X = int(self.box_x)
            Y = int(self.box_y)
            error_Y = 240 - Y
            error_X = 320 - X
            # CVThread.servoMove(CVThread.P_servo, CVThread.P_direction, error_X)
            CVThread.servoMove(CVThread.T_servo, CVThread.T_direction, error_Y)

            # if CVThread.X_lock == 1 and CVThread.Y_lock == 1:
            if CVThread.Y_lock == 1:
                led.setColor(255,78,0)
                # switch.switch(1,1)
                # switch.switch(2,1)
                # switch.switch(3,1)
            else:
                led.setColor(0,78,255)
                # switch.switch(1,0)
                # switch.switch(2,0)
                # switch.switch(3,0)
        else:
            self.findColorDetection = 0
            move.motorStop()
        self.pause() 
Example #3
Source File: hsv_track.py    From DroneSimLab with MIT License 7 votes vote down vote up
def find_red(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv,(130,130,180),(255,255,255))
    mask = cv2.erode(mask, np.ones((2,1)) , iterations=1)
    mask = cv2.dilate(mask, None, iterations=3)
    cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
    frame=img.copy()    
    ###based on example from  http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv
    if len(cnts) > 0:
        c = max(cnts, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        if radius > 3:
            cv2.circle(frame, (int(x), int(y)), 12,(0, 255, 255), 2)
    return frame 
Example #4
Source File: mask_analysis.py    From deepgaze with MIT License 6 votes vote down vote up
def returnMaxAreaCircle(self, mask):
        """it returns the circle sorrounding the contour with the largest area.
 
        @param mask the binary image to use in the function
        @return get the center (x, y) and the radius of the circle
        """
        if(mask is None): return (None, None, None)
        mask = np.copy(mask)
        if(len(mask.shape) == 3):
            mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        contours, hierarchy = cv2.findContours(mask, 1, 2)
        area_array = np.zeros(len(contours)) #contains the area of the contours
        counter = 0
        for cnt in contours:   
                area_array[counter] = cv2.contourArea(cnt)
                counter += 1
        if(area_array.size==0): return (None, None, None) #the array is empty
        max_area_index = np.argmax(area_array) #return the index of the max_area element
        cnt = contours[max_area_index]
        (x,y),radius = cv2.minEnclosingCircle(cnt)
        return (int(x),int(y), int(radius)) 
Example #5
Source File: L2_track_target.py    From SCUTTLE with MIT License 6 votes vote down vote up
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))):

    image = cam.newImage()
    if filter == 'RGB':
        frame_to_thresh = image.copy()
    else:
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)                            # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

    thresh = cv2.inRange(frame_to_thresh, color_range[0], color_range[1])

    # apply a blur function
    kernel = np.ones((5, 5), np.uint8)
    mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)                                 # Apply blur
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)                                  # Apply blur 2nd iteration

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]    # generates number of contiguous "1" pixels
    if len(cnts) > 0:                                                                       # begin processing if there are "1" pixels discovered
        c = max(cnts, key=cv2.contourArea)                                                  # return the largest target area
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        return np.array([round(x, 1), round(y, 1), round(radius, 1)])
    else:
        return np.array([None, None, 0]) 
Example #6
Source File: L2_color_target.py    From SCUTTLE with MIT License 6 votes vote down vote up
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))):

    image = cam.newImage()
    if filter == 'RGB':
        frame_to_thresh = image.copy()
    else:
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)                # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

    thresh = cv2.inRange(frame_to_thresh, color_range[0], color_range[1])
    mask = thresh

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]    # generates number of contiguous "1" pixels
    if len(cnts) == 0:                                                                      # begin processing if there are "1" pixels discovered
        return np.array([None, None, 0])
    else:
        c = max(cnts, key=cv2.contourArea)                                                  # return the largest target area
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        if radius > 4:
            return np.array([round(x, 1), round(y, 1), round(radius, 1)]) 
Example #7
Source File: preprocess.py    From jama16-retina-replication with MIT License 5 votes vote down vote up
def _find_contours(image):
    """
    Helper function for finding contours of image.

    Returns coordinates of contours.
    """
    # Increase constrast in image to increase changes of finding
    # contours.
    processed = _increase_contrast(image)

    # Get the gray-scale of the image.
    gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)

    # Detect contour(s) in the image.
    cnts = cv2.findContours(
        gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
    center = None

    # At least ensure that some contours were found.
    if len(cnts) > 0:
        # Find the largest contour in the mask.
        c = max(cnts, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)

        # Assume the radius is of a certain size.
        if radius > 100:
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            return (center, radius) 
Example #8
Source File: create_dataset.py    From keras-autoencoder with GNU General Public License v3.0 5 votes vote down vote up
def detect_ball(frame):
	blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	mask = cv2.inRange(hsv, greenLower, greenUpper)
	mask = cv2.erode(mask, None, iterations=2)
	mask = cv2.dilate(mask, None, iterations=2)

	cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
	center = None
 
	# only proceed if at least one contour was found
	if len(cnts) == 0:
		return

	# find the largest contour in the mask, then use
	# it to compute the minimum enclosing circle and
	# centroid
	c = max(cnts, key=cv2.contourArea)
	((x, y), radius) = cv2.minEnclosingCircle(c)
	M = cv2.moments(c)
	center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

	if radius < 10:
		print('Too small')
		return

	return center, radius 
Example #9
Source File: Features.py    From SimpleCV2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def boundingCircle(self):
        """
        **SUMMARY**

        This function calculates the minimum bounding circle of the blob in the image
        as an (x,y,r) tuple

        **RETURNS**

        An (x,y,r) tuple where (x,y) is the center of the circle and r is the radius

        **EXAMPLE**

        >>> img = Image("RatMask.png")
        >>> blobs = img.findBlobs()
        >>> print blobs[-1].boundingCircle()

        """

        try:
            import cv2
        except:
            logger.warning("Unable to import cv2")
            return None

        # contour of the blob in image
        contour = self.contour()

        points = []
        # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()
        for pair in contour:
            points.append([[pair[0], pair[1]]])

        points = np.array(points)

        (cen, rad) = cv2.minEnclosingCircle(points);

        return (cen[0], cen[1], rad)


#--------------------------------------------- 
Example #10
Source File: ColoredObjectDetector.py    From robot-camera-platform with GNU General Public License v3.0 5 votes vote down vote up
def process(self, image):
        self.detected = False
        hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1])
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        if len(contours) == 0:
            return
        largest_contour = max(contours, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(largest_contour)
        M = cv2.moments(largest_contour)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        self.circle_coordonates = (center, int(radius))
        self.detected = True 
Example #11
Source File: judge_color_center.py    From Python-Code with MIT License 5 votes vote down vote up
def getColor(frame):
	hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
	maxsum = 0
	color = None
	color_dict = colorList.getColorList()

	# 对每个颜色进行判断
	for d in color_dict:
		# 根据阈值构建掩膜
		mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
		# 腐蚀操作
		mask = cv2.erode(mask, None, iterations=2)
		# 膨胀操作,其实先腐蚀再膨胀的效果是开运算,去除噪点
		mask = cv2.dilate(mask, None, iterations=2)	
		img, cnts, hiera = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
		
		# 有轮廓才进行后面的判断
		if len(cnts) > 0:	
			# 计算识别区域的面积
			sum = 0
			for c in cnts:
				sum += cv2.contourArea(c)
			
			# 找到最大面积并找到质心
			if sum > maxsum :
				maxsum = sum	
				if maxsum != 0:
					color = d
				else:
					color = None
				# 找到面积最大的轮廓
				c = max(cnts, key = cv2.contourArea)
				# 确定面积最大的轮廓的外接圆
				((x, y), radius) = cv2.minEnclosingCircle(c)
				# 计算轮廓的矩
				M = cv2.moments(c)
				# 计算质心
				center = (int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"]))
 
	return color, center 
Example #12
Source File: navbox.py    From katarina with MIT License 5 votes vote down vote up
def detectRoundel( frame, debug=False ):
    global g_mser
    global THRESHOLD_FRACTION
    if g_mser == None:
        g_mser = cv2.MSER( _delta = 10, _min_area=100, _max_area=300*50*2 )
    gray = cv2.cvtColor( frame, cv2.COLOR_BGR2GRAY )
    contours = g_mser.detect(gray, None)
    rectangles = []
    circles = []
    for cnt in contours:
        rect = cv2.minAreaRect(cnt)
        area = len(cnt) # MSER returns all points within area, not boundary points
        rectangleArea = float(rect[1][0]*rect[1][1])
        rectangleAspect = max(rect[1][0], rect[1][1]) / float(min(rect[1][0], rect[1][1]))
        if area/rectangleArea > 0.70 and rectangleAspect > 3.0:
            (x,y),(w,h),angle = rect
            rectangles.append( ((int(x+0.5),int(y+0.5)), (int(w+0.5),int(h+0.5)), int(angle)) )
        cir = cv2.minEnclosingCircle(cnt)
        (x,y),radius = cir
        circleArea = math.pi*radius*radius
        if area/circleArea > 0.64:
            circles.append( ((int(x+0.5),int(y+0.5)),int(radius+0.5)) )
    rectangles = removeDuplicities( rectangles )
    result = matchCircRect( circles=circles, rectangles=rectangles )
    if debug:
        for rect in rectangles:
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
            cv2.drawContours( frame,[box],0,(255,0,0),2)
        for cir in circles:
            (x,y),radius = cir
            center = (int(x),int(y))
            radius = int(radius)
            cv2.circle(frame, center, radius, (0,255,0), 2)
        if result:
            (x1,y1),(x2,y2) = result
            cv2.line(frame, (int(x1),int(y1)), (int(x2),int(y2)), (0,0,255), 3)
    return result 
Example #13
Source File: markers_detection.py    From niryo_one_ros with GNU General Public License v3.0 5 votes vote down vote up
def find_markers_from_img_thresh(img_thresh, max_dist_between_centers=3, min_radius_circle=4,
                                 max_radius_circle=35, min_radius_marker=7):
    contours = cv2.findContours(img_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
    list_potential_markers = []
    for cnt in contours:
        (x, y), radius = cv2.minEnclosingCircle(cnt)
        if not min_radius_circle < radius < max_radius_circle:
            continue
        center = (int(round(x)), int(round(y)))
        radius = int(radius)
        list_potential_markers.append(PotentialMarker(center, radius, cnt))

    list_potential_markers = sorted(list_potential_markers, key=lambda m: m.x)
    list_good_candidates = []

    for i, potential_marker in enumerate(list_potential_markers):
        if potential_marker.is_merged:
            continue
        marker1 = Marker(potential_marker)
        center_marker = marker1.get_center()

        for potential_marker2 in list_potential_markers[i + 1:]:
            if potential_marker.is_merged:
                continue
            center_potential = potential_marker2.get_center()
            if center_potential[0] - center_marker[0] > max_dist_between_centers:
                break
            dist = euclidean_dist_2_pts(center_marker, center_potential)
            if dist <= max_dist_between_centers:
                marker1.add_circle(potential_marker2)
                center_marker = marker1.get_center()

        if marker1.nb_circles() > 2 and marker1.radius >= min_radius_marker:
            list_good_candidates.append(marker1)
            marker1.get_id_from_slice(img_thresh)

    return list_good_candidates 
Example #14
Source File: tests_single.py    From mbac with MIT License 5 votes vote down vote up
def test_inner_contour():
    """ displays the inner contour and the minimum circled enclosed on it
    """
    
    #testing inner contour
    
    isExit = False
    
    for frame in frames_list:
        #open the frame in grayscake
        img = cv.imread(path + frame, 0)
        
        #call detect_container from tools_single.py
        inner_cnt = tools.detect_container(img)
        
        #conver the frame to BGR
        img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
        
        #get minimin enclosing circle and draw it 
        #along with inner contour
        (x,y),radius = cv.minEnclosingCircle(inner_cnt)
        center = (int(x),int(y))
        radius = int(radius)
        cv.circle(img,center,radius,(0,255,0),1)
        cv.drawContours(img, [inner_cnt], -1, (200,0,0), 1)
        
        #display frame
        cv.imshow('image',img)
        
        # delay and 'q' key press to exit the animation
        if cv.waitKey(50) & 0xFF == ord('q'):
            isExit = True
            break
        
    while(not isExit):
        # prevent exit the display window till the user presses 'q'
        if cv.waitKey(1) & 0xFF == ord('q'):
            break
    cv.destroyAllWindows()
    
    return None 
Example #15
Source File: tools_single.py    From mbac with MIT License 5 votes vote down vote up
def detect_container(img):
    """ detects the inner boundary of the petridish in an image
    input
        img: grayscale image as numpy array
    ouput: 
        cnt: an opencv contour object representing the inner border 
              of the petridish
    """
    # edge detection
    edges = cv.Canny(img,18,32)
    
    # dilate edges
    # to close small openings
    kernel = np.ones((2,2),np.uint8)
    edges = cv.dilate(edges,kernel,iterations = 2)
    
    #find contours
    im2, contours, hierarchy = cv.findContours(edges, cv.RETR_TREE, 
                                               cv.CHAIN_APPROX_NONE)
    
    #detect the biggest contour
    outer_cntIndex = np.argmax([cv.contourArea(cnt) for cnt in contours])
    outer_cnt = contours[outer_cntIndex]
    
    #filter contours that have area > 0.6*Area of max contour
    filt_cnts = [cnt for cnt in contours if cv.contourArea(cnt)>0.6*cv.contourArea(outer_cnt)]
    
    #get the minimun contour of the filterd ones
    inner_cntIndex = np.argmin([cv.contourArea(cnt) for cnt in filt_cnts])
    inner_cnt= filt_cnts[inner_cntIndex]
    
    # get the minimin enclosing circle for the inner contour
    # to get a perfect circular shape
    # (x,y),radius = cv.minEnclosingCircle(inner_cnt)
    # center = (int(x),int(y))
    # radius = int(radius)
    # cv.circle(img,center,radius,(255,0,255),1)
    
    return inner_cnt 
Example #16
Source File: process_image.py    From RealTime-DigitRecognition with GNU General Public License v3.0 5 votes vote down vote up
def get_output_image(path):
  
    img = cv2.imread(path,2)
    img_org =  cv2.imread(path)

    ret,thresh = cv2.threshold(img,127,255,0)
    im2,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

    for j,cnt in enumerate(contours):
        epsilon = 0.01*cv2.arcLength(cnt,True)
        approx = cv2.approxPolyDP(cnt,epsilon,True)
        
        hull = cv2.convexHull(cnt)
        k = cv2.isContourConvex(cnt)
        x,y,w,h = cv2.boundingRect(cnt)
        
        if(hierarchy[0][j][3]!=-1 and w>10 and h>10):
            #putting boundary on each digit
            cv2.rectangle(img_org,(x,y),(x+w,y+h),(0,255,0),2)
            
            #cropping each image and process
            roi = img[y:y+h, x:x+w]
            roi = cv2.bitwise_not(roi)
            roi = image_refiner(roi)
            th,fnl = cv2.threshold(roi,127,255,cv2.THRESH_BINARY)

            # getting prediction of cropped image
            pred = predict_digit(roi)
            print(pred)
            
            # placing label on each digit
            (x,y),radius = cv2.minEnclosingCircle(cnt)
            img_org = put_label(img_org,pred,x,y)

    return img_org 
Example #17
Source File: QuickDrawApp.py    From QuickDraw with MIT License 4 votes vote down vote up
def main():
    emojis = get_QD_emojis()
    cap = cv2.VideoCapture(0)
    Lower_green = np.array([110, 50, 50])
    Upper_green = np.array([130, 255, 255])
    pts = deque(maxlen=512)
    blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
    digit = np.zeros((200, 200, 3), dtype=np.uint8)
    pred_class = 0

    while (cap.isOpened()):
        ret, img = cap.read()
        img = cv2.flip(img, 1)
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        kernel = np.ones((5, 5), np.uint8)
        mask = cv2.inRange(hsv, Lower_green, Upper_green)
        mask = cv2.erode(mask, kernel, iterations=2)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        # mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
        mask = cv2.dilate(mask, kernel, iterations=1)
        res = cv2.bitwise_and(img, img, mask=mask)
        cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
        center = None

        if len(cnts) >= 1:
            cnt = max(cnts, key=cv2.contourArea)
            if cv2.contourArea(cnt) > 200:
                ((x, y), radius) = cv2.minEnclosingCircle(cnt)
                cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
                cv2.circle(img, center, 5, (0, 0, 255), -1)
                M = cv2.moments(cnt)
                center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
                pts.appendleft(center)
                for i in range(1, len(pts)):
                    if pts[i - 1] is None or pts[i] is None:
                        continue
                    cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)
                    cv2.line(img, pts[i - 1], pts[i], (0, 0, 255), 2)
        elif len(cnts) == 0:
            if len(pts) != []:
                blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
                blur1 = cv2.medianBlur(blackboard_gray, 15)
                blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
                thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
                blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
                if len(blackboard_cnts) >= 1:
                    cnt = max(blackboard_cnts, key=cv2.contourArea)
                    print(cv2.contourArea(cnt))
                    if cv2.contourArea(cnt) > 2000:
                        x, y, w, h = cv2.boundingRect(cnt)
                        digit = blackboard_gray[y:y + h, x:x + w]
                        pred_probab, pred_class = keras_predict(model, digit)
                        print(pred_class, pred_probab)

            pts = deque(maxlen=512)
            blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
            img = overlay(img, emojis[pred_class], 400, 250, 100, 100)
        cv2.imshow("Frame", img)
        k = cv2.waitKey(10)
        if k == 27:
            break 
Example #18
Source File: detect_crazyflie.py    From ROS-Robotics-By-Example with MIT License 4 votes vote down vote up
def image_callback(self, msg):

      # convert ROS image to OpenCV image
      try:
         image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
      except CvBridgeError as e:
         print(e)

      # create hsv image of scene
      hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

      # find green objects in the image
      lower_green = numpy.array([50, 50, 177], numpy.uint8)      # fluffy green ball
      upper_green = numpy.array([84, 150, 255], numpy.uint8)
      mask = cv2.inRange(hsv, lower_green, upper_green)

      # dilate and erode with kernel size 11x11
      cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11))) 

      # find all of the contours in the mask image
      contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
      self.contourLength  = len(contours)

      # Check for at least one ball found
      if self.contourLength < 1:
         print "No objects found"
         sys.exit("No objects found")        # if no Crazyflie in image, exit process

      ## Loop through all of the contours, and get their areas
      area = [0.0]*len(contours)
      for i in range(self.contourLength):
         area[i] = cv2.contourArea(contours[i])

      #### Ball #### the largest "green" object
      ball_image = contours[area.index(max(area))]

      # Find the circumcircle of the green ball and draw a blue outline around it
      (self.cf_u,self.cf_v),radius = cv2.minEnclosingCircle(ball_image)
      ball_center = (int(self.cf_u),int(self.cf_v))
      ball_radius = int(radius)
      cv2.circle(image, ball_center, ball_radius, (255,0,0), 2)

      # show image with green ball outlined with a blue circle
      cv2.imshow ("KinectV2", image)
      cv2.waitKey(3)


   # This callback function handles processing Kinect depth image, looking for the depth value 
   #   at the location of the center of the green ball on top of Crazyflie. 
Example #19
Source File: L0_mjpg_streamer_filter.py    From SCUTTLE with MIT License 4 votes vote down vote up
def colorTracking(self, image):

        image = cv2.resize(image,(width,height)) # resize the image

        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)  # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

        thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max)) # Converts a 240x160x3 matrix to a 240x160x1 matrix
        # cv2.inrange discovers the pixels that fall within the specified range and assigns 1's to these pixels and 0's to the others.

        # apply a blur function
        kernel = np.ones((5,5),np.uint8)
        mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) # Apply blur
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # Blur again

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] #generates number of contiguous "1" pixels
        center = None # create a variable for x, y location of target

        if len(cnts) > 0:   # begin processing if there are "1" pixels discovered

            c = max(cnts, key=cv2.contourArea)  # return the largest target area
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            center = (int(x), int(y))  # defines a circle around the largest target area

            if radius > 6:

                cv2.circle(image, (int(x), int(y)), int(radius),(0, 255, 255), 2) #draw a circle on the image
                cv2.circle(image, (int(x), int(y)), 3, (0, 0, 0), -1) # draw a dot on the target center
                cv2.circle(image, (int(x), int(y)), 1, (255, 255, 255), -1) # draw a dot on the target center

                # cv2.putText(image,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 255),1)

                cv2.putText(image,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0,0,0),2,cv2.LINE_AA)
                cv2.putText(image,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(255,255,255),1,cv2.LINE_AA)

        image_height, image_width, channels = image.shape   # get image dimensions

        spacer = np.zeros((image_height,3,3), np.uint8)
        spacer[:,0:width//2] = (255,255,255)      # (B, G, R)

        # make 3 images to have the same colorspace, for combining
        thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
        # border1 = np.array() # use H, height of photos to define
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        # border2 = np.array() # same as above
        all = np.hstack((image, spacer, thresh, spacer, mask))

        # cv2.line(all,(image_width,0),(image_width,image_height), (0xff, 0xff, 0xff), thickness=3)
        # cv2.line(all,(image_width*2,0),(image_width*2,image_height), (0xff, 0xff, 0xff), thickness=3)

        # draw text on top of the image for identification
        cv2.putText(all,'Original',(10,int(image_height/10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),2,cv2.LINE_AA)
        cv2.putText(all,'Original',(10,int(image_height/10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)

        cv2.putText(all,'Thresh',(image_width+10,int(image_height/10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),2,cv2.LINE_AA)
        cv2.putText(all,'Thresh',(image_width+10,int(image_height/10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)

        cv2.putText(all,'Mask',((image_width*2)+10,int(image_height/10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),2,cv2.LINE_AA)
        cv2.putText(all,'Mask',((image_width*2)+10,int(image_height/10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)

        return all 
Example #20
Source File: color_tracker_calibrate.py    From SCUTTLE with MIT License 4 votes vote down vote up
def main():

	camera = cv2.VideoCapture(0)

	camera.set(3, width)
	camera.set(4, height)

	while True:

		ret, image = camera.read()
		if not ret:
			break

		image = rotateImage(image,180)

        if filter == 'RGB':
            frame_to_thresh = image.copy()
        else:
            frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

		thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))

		kernel = np.ones((5,5),np.uint8)
		mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
		mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

		cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
		center = None

		if len(cnts) > 0:

			c = max(cnts, key=cv2.contourArea)
			((x, y), radius) = cv2.minEnclosingCircle(c)
			M = cv2.moments(c)
			center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

			if radius > 6:

				cv2.circle(image, (int(x), int(y)), int(radius),(0, 255, 255), 2)
				cv2.circle(image, center, 3, (0, 0, 255), -1)
				cv2.putText(image,"centroid", (center[0]+10,center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 255),1)
				cv2.putText(image,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 255),1)

		# cv2.imshow("Original", image)
		# cv2.imshow("Thresh", thresh)
		# cv2.imshow("Mask", mask)

		return image 
Example #21
Source File: tracker.py    From telloCV with Apache License 2.0 4 votes vote down vote up
def track(self, frame):
        """Simple HSV color space tracking"""
        # resize the frame, blur it, and convert it to the HSV
        # color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # construct a mask for the color then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, self.color_lower, self.color_upper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0]
        center = None

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # only proceed if the radius meets a minimum size
            if radius > 10:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                cv2.circle(frame, (int(x), int(y)), int(radius),
                           (0, 255, 255), 2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)

                self.xoffset = int(center[0] - self.midx)
                self.yoffset = int(self.midy - center[1])
            else:
                self.xoffset = 0
                self.yoffset = 0
        else:
            self.xoffset = 0
            self.yoffset = 0
        return self.xoffset, self.yoffset