Python cv2.cv.CreateImage() Examples

The following are 19 code examples of cv2.cv.CreateImage(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2.cv , or try the search function .
Example #1
Source File: engine.py    From opencv-engine with MIT License 9 votes vote down vote up
def gen_image(self, size, color_value):
        img0 = cv.CreateImage(size, self.image_depth, self.image_channels)
        if color_value == 'transparent':
            color = (255, 255, 255, 255)
        else:
            color = self.parse_hex_color(color_value)
            if not color:
                raise ValueError('Color %s is not valid.' % color_value)
        cv.Set(img0, color)
        return img0 
Example #2
Source File: camshift.py    From PyCV-time with MIT License 6 votes vote down vote up
def hue_histogram_as_image(self, hist):
        """ Returns a nice representation of a hue histogram """

        histimg_hsv = cv.CreateImage( (320,200), 8, 3)

        mybins = cv.CloneMatND(hist.bins)
        cv.Log(mybins, mybins)
        (_, hi, _, _) = cv.MinMaxLoc(mybins)
        cv.ConvertScale(mybins, mybins, 255. / hi)

        w,h = cv.GetSize(histimg_hsv)
        hdims = cv.GetDims(mybins)[0]
        for x in range(w):
            xh = (180 * x) / (w - 1)  # hue sweeps from 0-180 across the image
            val = int(mybins[int(hdims * x / w)] * h / 255)
            cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
            cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)

        histimg = cv.CreateImage( (320,200), 8, 3)
        cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
        return histimg 
Example #3
Source File: engine.py    From opencv-engine with MIT License 6 votes vote down vote up
def rotate(self, degrees):
        if (degrees > 180):
            # Flip around both axes
            cv.Flip(self.image, None, -1)
            degrees = degrees - 180

        img = self.image
        size = cv.GetSize(img)

        if (degrees / 90 % 2):
            new_size = (size[1], size[0])
            center = ((size[0] - 1) * 0.5, (size[0] - 1) * 0.5)
        else:
            new_size = size
            center = ((size[0] - 1) * 0.5, (size[1] - 1) * 0.5)

        mapMatrix = cv.CreateMat(2, 3, cv.CV_64F)
        cv.GetRotationMatrix2D(center, degrees, 1.0, mapMatrix)
        dst = cv.CreateImage(new_size, self.image_depth, self.image_channels)
        cv.SetZero(dst)
        cv.WarpAffine(img, dst, mapMatrix)
        self.image = dst 
Example #4
Source File: Tshirt.py    From virtual-dressing-room with Apache License 2.0 6 votes vote down vote up
def __init__(self):
        self.norm_rgb=np.zeros((config.height,config.width,3),np.uint8)
        self.dst=np.zeros((config.height,config.width),np.uint8)
        self.b=0
        self.g=0
        self.r=0
        
        self.lb=0
        self.lg=0
        self.lr=0
        
        self.m=np.zeros((config.height,config.width),np.uint8)
        #self.win=cv2.namedWindow("detect")
        #self.dst=cv.CreateImage((config.width,config.height),8,1)
        
        #cv2.createTrackbar("blue", "detect",0,255,self.change_b)
        #cv2.createTrackbar("green","detect",0,255,self.change_g)
        #cv2.createTrackbar("red","detect",0,255,self.change_r)
        
        #cv2.createTrackbar("low_blue", "detect",0,255,self.change_lb)
        #cv2.createTrackbar("low_green","detect",0,255,self.change_lg)
        #cv2.createTrackbar("low_red","detect",0,255,self.change_lr) 
Example #5
Source File: engine.py    From opencv-engine with MIT License 5 votes vote down vote up
def resize(self, width, height):
        thumbnail = cv.CreateImage(
            (int(round(width, 0)), int(round(height, 0))),
            self.image_depth,
            self.image_channels
        )
        cv.Resize(self.image, thumbnail, cv.CV_INTER_AREA)
        self.image = thumbnail 
Example #6
Source File: demhist.py    From PyCV-time with MIT License 5 votes vote down vote up
def __init__(self, src_image):
        self.src_image = src_image
        self.dst_image = cv.CloneMat(src_image)
        self.hist_image = cv.CreateImage((320, 200), 8, 1)
        self.hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1)

        self.brightness = 0
        self.contrast = 0

        cv.NamedWindow("image", 0)
        cv.NamedWindow("histogram", 0)
        cv.CreateTrackbar("brightness", "image", 100, 200, self.update_brightness)
        cv.CreateTrackbar("contrast", "image", 100, 200, self.update_contrast)

        self.update_brightcont() 
Example #7
Source File: facedetect.py    From PyCV-time with MIT License 5 votes vote down vote up
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img) 
Example #8
Source File: Camera.py    From SimpleCV2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getImage(self):
        """
        **SUMMARY**

        Retrieve an Image-object from the virtual camera.
        **RETURNS**

        A SimpleCV Image from the camera.

        **EXAMPLES**

        >>> cam = VirtualCamera()
        >>> while True:
        >>>    cam.getImage().show()

        """
        if (self.sourcetype == 'image'):
            self.counter = self.counter + 1
            return Image(self.source, self)

        elif (self.sourcetype == 'imageset'):
            print len(self.source)
            img = self.source[self.counter % len(self.source)]
            self.counter = self.counter + 1
            return img

        elif (self.sourcetype == 'video'):
            # cv.QueryFrame returns None if the video is finished
            frame = cv.QueryFrame(self.capture)
            if frame:
                img = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
                cv.Copy(frame, img)
                return Image(img, self)
            else:
                return None

        elif (self.sourcetype == 'directory'):
            img = self.findLastestImage(self.source, 'bmp')
            self.counter = self.counter + 1
            return Image(img, self) 
Example #9
Source File: Camera.py    From SimpleCV2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getImage(self):
        """
        **SUMMARY**

        Retrieve an Image-object from the camera.  If you experience problems
        with stale frames from the camera's hardware buffer, increase the flushcache
        number to dequeue multiple frames before retrieval

        We're working on how to solve this problem.

        **RETURNS**

        A SimpleCV Image from the camera.

        **EXAMPLES**

        >>> cam = Camera()
        >>> while True:
        >>>    cam.getImage().show()

        """

        if self.pygame_camera:
            return Image(self.pygame_buffer.copy())

        if (not self.threaded):
            cv.GrabFrame(self.capture)
            self.capturetime = time.time()
        else:
            self.capturetime = self._threadcapturetime

        frame = cv.RetrieveFrame(self.capture)
        newimg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
        cv.Copy(frame, newimg)
        return Image(newimg, self) 
Example #10
Source File: motion-detection.py    From rpi-opencv with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,threshold=25, doRecord=True, showWindows=True):
        self.writer = None
        self.font = None
        self.doRecord=doRecord #Either or not record the moving object
        self.show = showWindows #Either or not show the 2 windows
        self.frame = None
    
        #self.capture=cv.CaptureFromCAM(0)
        self.capture=cv.CaptureFromFile('crash-480.mp4')
        self.frame = cv.QueryFrame(self.capture) #Take a frame to init recorder
        if doRecord:
            self.initRecorder()
        
        self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1)
        self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3)
        self.absdiff_frame = None
        self.previous_frame = None
        
        self.surface = self.frame.width * self.frame.height
        self.currentsurface = 0
        self.currentcontours = None
        self.threshold = threshold
        self.isRecording = False
        self.trigger_time = 0 #Hold timestamp of the last detection
        
        if showWindows:
            cv.NamedWindow("Image")
            cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange) 
Example #11
Source File: tcg_ocr_scanner.py    From tcg-ocr-scanner with GNU General Public License v3.0 5 votes vote down vote up
def doGreyscaleProcessor(self, img):
		(x, y, w, h) = self.frame.rect
		bw = cv.CreateImage((w, h), 8, 1)
		cv.CvtColor(img, bw, cv.CV_RGB2GRAY)
		return bw 
Example #12
Source File: LSBSteg.py    From Stego_Dropper with MIT License 5 votes vote down vote up
def unhideImage(self):
        width = int(self.readBits(16),2) #Read 16bits and convert it in int
        height = int(self.readBits(16),2)
        unhideimg = cv.CreateImage((width,height), 8, 3) #Create an image in which we will put all the pixels read
        for h in range(height):
            for w in range(width):
                for chan in range(unhideimg.channels):
                    val = list(unhideimg[h,w])
                    val[chan] = int(self.readByte(),2) #Read the value
                    unhideimg[h,w] = tuple(val)
        return unhideimg 
Example #13
Source File: engine.py    From opencv-engine with MIT License 5 votes vote down vote up
def enable_alpha(self):
        if self.image_channels < 4:
            with_alpha = cv.CreateImage(
                (self.image.width, self.image.height), self.image_depth, 4
            )
            if self.image_channels == 3:
                cv.CvtColor(self.image, with_alpha, cv.CV_BGR2BGRA)
            else:
                cv.CvtColor(self.image, with_alpha, cv.CV_GRAY2BGRA)
            self.image = with_alpha 
Example #14
Source File: engine.py    From opencv-engine with MIT License 5 votes vote down vote up
def image_data_as_rgb(self, update_image=True):
        # TODO: Handle other formats
        if self.image_channels == 4:
            mode = 'BGRA'
        elif self.image_channels == 3:
            mode = 'BGR'
        else:
            mode = 'BGR'
            rgb_copy = cv.CreateImage((self.image.width, self.image.height), 8, 3)
            cv.CvtColor(self.image, rgb_copy, cv.CV_GRAY2BGR)
            self.image = rgb_copy
        return mode, self.image.tostring() 
Example #15
Source File: engine.py    From opencv-engine with MIT License 5 votes vote down vote up
def crop(self, left, top, right, bottom):
        new_width = right - left
        new_height = bottom - top
        cropped = cv.CreateImage(
            (new_width, new_height), self.image_depth, self.image_channels
        )
        src_region = cv.GetSubRect(self.image, (left, top, new_width, new_height))
        cv.Copy(src_region, cropped)

        self.image = cropped 
Example #16
Source File: fitellipse.py    From PyCV-time with MIT License 4 votes vote down vote up
def process_image(self, slider_pos):
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        stor = cv.CreateMemStorage()

        # Create the destination images
        image02 = cv.CloneImage(self.source_image)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02,
            stor,
            cv.CV_RETR_LIST,
            cv.CV_CHAIN_APPROX_NONE,
            (0, 0))

        for c in contour_iterator(cont):
            # Number of points must be more than or equal to 6 for cv.FitEllipse2
            if len(c) >= 6:
                # Copy the contour into an array of (x,y)s
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray,0,1,8,(0,0))

                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                # Draw ellipse in random color
                color = cv.CV_RGB(random.randrange(256),random.randrange(256),random.randrange(256))
                cv.Ellipse(image04, center, size,
                          angle, 0, 360,
                          color, 2, cv.CV_AA, 0)

        # Show image. HighGUI use.
        cv.ShowImage( "Result", image04 ) 
Example #17
Source File: motempl.py    From PyCV-time with MIT License 4 votes vote down vote up
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0) 
Example #18
Source File: cv20squares.py    From PyCV-time with MIT License 4 votes vote down vote up
def find_squares4(color_img):
    """
    Finds multiple squares in image

    Steps:
    -Use Canny edge to highlight contours, and dilation to connect
    the edge segments.
    -Threshold the result to binary edge tokens
    -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
    -Filter each candidate: use Approx poly, keep only contours with 4 vertices,
    enough area, and ~90deg angles.

    Return all squares contours in one flat list of arrays, 4 x,y points each.
    """
    #select even sizes only
    width, height = (color_img.width & -2, color_img.height & -2 )
    timg = cv.CloneImage( color_img ) # make a copy of input image
    gray = cv.CreateImage( (width,height), 8, 1 )

    # select the maximum ROI in the image
    cv.SetImageROI( timg, (0, 0, width, height) )

    # down-scale and upscale the image to filter out the noise
    pyr = cv.CreateImage( (width/2, height/2), 8, 3 )
    cv.PyrDown( timg, pyr, 7 )
    cv.PyrUp( pyr, timg, 7 )

    tgray = cv.CreateImage( (width,height), 8, 1 )
    squares = []

    # Find squares in every color plane of the image
    # Two methods, we use both:
    # 1. Canny to catch squares with gradient shading. Use upper threshold
    # from slider, set the lower to 0 (which forces edges merging). Then
    # dilate canny output to remove potential holes between edge segments.
    # 2. Binary thresholding at multiple levels
    N = 11
    for c in [0, 1, 2]:
        #extract the c-th color plane
        cv.SetImageCOI( timg, c+1 );
        cv.Copy( timg, tgray, None );
        cv.Canny( tgray, gray, 0, 50, 5 )
        cv.Dilate( gray, gray)
        squares = squares + find_squares_from_binary( gray )

        # Look for more squares at several threshold levels
        for l in range(1, N):
            cv.Threshold( tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY )
            squares = squares + find_squares_from_binary( gray )

    return squares 
Example #19
Source File: camshift.py    From PyCV-time with MIT License 4 votes vote down vote up
def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
        backproject_mode = False
        while True:
            frame = cv.QueryFrame( self.capture )

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject( [self.hue], backproject, hist )
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x,y,w,h = self.selection
                cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))

                sel = cv.GetSubRect(self.hue, self.selection )
                cv.CalcArrHist( [sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )

            if not backproject_mode:
                cv.ShowImage( "CamShiftDemo", frame )
            else:
                cv.ShowImage( "CamShiftDemo", backproject)
            cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode