Python cv.IPL_DEPTH_8U Examples
The following are 6
code examples of cv.IPL_DEPTH_8U().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv
, or try the search function
.
Example #1
Source File: frame_convert.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 6 votes |
def pretty_depth_cv(depth): """Converts depth into a 'nicer' format for display This is abstracted to allow for experimentation with normalization Args: depth: A numpy array with 2 bytes per pixel Returns: An opencv image who's datatype is unspecified """ import cv depth = pretty_depth(depth) image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), cv.IPL_DEPTH_8U, 1) cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1]) return image
Example #2
Source File: frame_convert.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 6 votes |
def video_cv(video): """Converts video into a BGR format for opencv This is abstracted out to allow for experimentation Args: video: A numpy array with 1 byte per pixel, 3 channels RGB Returns: An opencv image who's datatype is 1 byte, 3 channel BGR """ import cv video = video[:, :, ::-1] # RGB -> BGR image = cv.CreateImageHeader((video.shape[1], video.shape[0]), cv.IPL_DEPTH_8U, 3) cv.SetData(image, video.tostring(), video.dtype.itemsize * 3 * video.shape[1]) return image
Example #3
Source File: faces.py From yournextrepresentative with GNU Affero General Public License v3.0 | 5 votes |
def detectFaces(im): # This function takes a PIL image and finds the patterns defined in the # haarcascade function modified from: http://www.lucaamore.com/?p=638 # Convert a PIL image to a greyscale cv image # from: http://pythonpath.wordpress.com/2012/05/08/pil-to-opencv-image/ im = im.convert('L') cv_im = cv.CreateImageHeader(im.size, cv.IPL_DEPTH_8U, 1) cv.SetData(cv_im, im.tobytes(), im.size[0]) # variables min_size = (20, 20) haar_scale = 1.1 min_neighbors = 3 haar_flags = 0 # Equalize the histogram cv.EqualizeHist(cv_im, cv_im) # Detect the faces faces = cv.HaarDetectObjects( cv_im, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size ) return faces
Example #4
Source File: characters_center.py From captchacker2 with GNU General Public License v3.0 | 5 votes |
def process_file(filenameIN, WIDTH = 31, HEIGHT = 31): print "processing file: "+ filenameIN if not (os.path.exists(filenameIN)): print "file not found. Aborting." return else : srcImg = cv.LoadImage(filenameIN,0) res = cv.CreateImage( (WIDTH, HEIGHT), cv.IPL_DEPTH_8U, 1 ) cv.Set(res, 255) xmin=WIDTH xmax=0 ymin=HEIGHT ymax=0 for i in range(srcImg.width): for j in range(srcImg.height): #print "xmax" #print cv.Get2D(srcImg, j, i) if cv.Get2D(srcImg, j, i)[0] == 0.0 : #print "xin" if i<xmin: xmin = i if i>xmax: xmax = i if j<ymin: ymin=j if j>ymax: ymax=j offsetx = (WIDTH - (xmax-xmin))/2 offsety = (HEIGHT - (ymax-ymin))/2 #print 'WIDTH',WIDTH,"offset",offsety,offsetx for i in range(xmax-xmin): for j in range(ymax-ymin): if ((offsety+j>0) and (offsety+j<res.height) and (offsetx+i>0) and (offsetx+i<res.width)): #print "haha" cv.Set2D(res, offsety+j, offsetx+i, cv.Get2D(srcImg, ymin+j, xmin+i)) cv.SaveImage(filenameIN, res)
Example #5
Source File: demo_cv_thresh_sweep.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def disp_thresh(lower, upper): depth, timestamp = freenect.sync_get_depth() depth = 255 * np.logical_and(depth > lower, depth < upper) depth = depth.astype(np.uint8) image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), cv.IPL_DEPTH_8U, 1) cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1]) cv.ShowImage('Depth', image) cv.WaitKey(10)
Example #6
Source File: demo_cv_threshold.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def show_depth(): global threshold global current_depth depth, timestamp = freenect.sync_get_depth() depth = 255 * np.logical_and(depth >= current_depth - threshold, depth <= current_depth + threshold) depth = depth.astype(np.uint8) image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), cv.IPL_DEPTH_8U, 1) cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1]) cv.ShowImage('Depth', image)