Python cv.CvtColor() Examples
The following are 7
code examples of cv.CvtColor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv
, or try the search function
.
Example #1
Source File: engine.py From opencv-engine with MIT License | 5 votes |
def image_data_as_rgb(self, update_image=True): # TODO: Handle other formats if self.image_channels == 4: mode = 'BGRA' elif self.image_channels == 3: mode = 'BGR' else: mode = 'BGR' rgb_copy = cv.CreateImage((self.image.width, self.image.height), 8, 3) cv.CvtColor(self.image, rgb_copy, cv.CV_GRAY2BGR) self.image = rgb_copy return mode, self.image.tostring()
Example #2
Source File: engine.py From opencv-engine with MIT License | 5 votes |
def convert_to_grayscale(self): if self.image_channels >= 3: # FIXME: OpenCV does not support grayscale with alpha channel? grayscaled = cv.CreateImage((self.image.width, self.image.height), self.image_depth, 1) cv.CvtColor(self.image, grayscaled, cv.CV_BGRA2GRAY) self.image = grayscaled
Example #3
Source File: engine.py From opencv-engine with MIT License | 5 votes |
def enable_alpha(self): if self.image_channels < 4: with_alpha = cv.CreateImage( (self.image.width, self.image.height), self.image_depth, 4 ) if self.image_channels == 3: cv.CvtColor(self.image, with_alpha, cv.CV_BGR2BGRA) else: cv.CvtColor(self.image, with_alpha, cv.CV_GRAY2BGRA) self.image = with_alpha
Example #4
Source File: face-detection.py From rpi-opencv with GNU General Public License v3.0 | 5 votes |
def detect_and_draw(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width,img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.)) if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) print "x= "+str(x)+" y= "+str(y)+" w= "+str(w)+" h= "+str(h) cv.ShowImage("Face detection", img)
Example #5
Source File: util.py From multisensory with Apache License 2.0 | 5 votes |
def pygame_from_cv(src): import cv """ return pygame image from opencv image """ src_rgb = cv.CreateMat(src.height, src.width, cv.CV_8UC3) cv.CvtColor(src, src_rgb, cv.CV_BGR2RGB) return pygame.image.frombuffer(src_rgb.tostring(), cv.GetSize(src_rgb), "RGB")
Example #6
Source File: color-2.py From rpi-opencv with GNU General Public License v3.0 | 4 votes |
def detect_and_draw(img): t1 = time.time() # allocate temporary images gray = cv.CreateImage((img.width,img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1) # blur the source image to reduce color noise cv.Smooth(img, img, cv.CV_BLUR, 3); hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1) #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img) # White sensitivity = 15 cv.InRangeS(hsv_img, (0, 0, 255-sensitivity), (255, sensitivity, 255), thresholded_img) # Red #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img) # Blue #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img) # Green #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img) mat=cv.GetMat(thresholded_img) moments = cv.Moments(mat, 0) area = cv.GetCentralMoment(moments, 0, 0) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(area > 5000): #determine the x and y coordinates of the center of the object #we are tracking by dividing the 1, 0 and 0, 1 moments by the area x = cv.GetSpatialMoment(moments, 1, 0)/area y = cv.GetSpatialMoment(moments, 0, 1)/area x = int(round(x)) y = int(round(y)) #create an overlay to mark the center of the tracked object overlay = cv.CreateImage(cv.GetSize(img), 8, 3) cv.Circle(overlay, (x, y), 2, (0, 0, 0), 20) cv.Add(img, overlay, img) #add the thresholded image back to the img so we can see what was #left after it was applied #cv.Merge(thresholded_img, None, None, None, img) t2 = time.time() message = "Color tracked!" print "detection time = %gs x=%d,y=%d" % ( round(t2-t1,3) , x, y) cv.ShowImage("Color detection", img)
Example #7
Source File: color-1.py From rpi-opencv with GNU General Public License v3.0 | 4 votes |
def run(self): while True: img = cv.QueryFrame( self.capture ) t1 = time.time() #blur the source image to reduce color noise cv.Smooth(img, img, cv.CV_BLUR, 3); #convert the image to hsv(Hue, Saturation, Value) so its #easier to determine the color to track(hue) hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) #limit all pixels that don't match our criteria, in this case we are #looking for purple but if you want you can adjust the first value in #both turples which is the hue range(120,140). OpenCV uses 0-180 as #a hue range for the HSV color model thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1) # White sensitivity = 10 cv.InRangeS(hsv_img, (0, 0, 255-sensitivity), (255, sensitivity, 255), thresholded_img) # Red #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img) # Blue #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img) # Green #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img) #determine the objects moments and check that the area is large #enough to be our object mat=cv.GetMat(thresholded_img) moments = cv.Moments(mat, 0) area = cv.GetCentralMoment(moments, 0, 0) #there can be noise in the video so ignore objects with small areas if(area > 10000): #determine the x and y coordinates of the center of the object #we are tracking by dividing the 1, 0 and 0, 1 moments by the area x = cv.GetSpatialMoment(moments, 1, 0)/area y = cv.GetSpatialMoment(moments, 0, 1)/area x = int(round(x)) y = int(round(y)) #create an overlay to mark the center of the tracked object overlay = cv.CreateImage(cv.GetSize(img), 8, 3) cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) cv.Add(img, overlay, img) #add the thresholded image back to the img so we can see what was #left after it was applied t2 = time.time() cv.Merge(thresholded_img, None, None, None, img) print "detection time = %gs x=%d,y=%d" % ( round(t2-t1,3) , x, y) #display the image cv.ShowImage(color_tracker_window, img) if cv.WaitKey(10) == 27: break