Python PyQt5.QtGui.QImage.Format_RGB888() Examples

The following are 19 code examples of PyQt5.QtGui.QImage.Format_RGB888(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PyQt5.QtGui.QImage , or try the search function .
Example #1
Source File: MainWindow.py    From Traffic-Rules-Violation-Detection with GNU General Public License v3.0 7 votes vote down vote up
def toQImage(self, raw_img):
        from numpy import copy
        img = copy(raw_img)
        qformat = QImage.Format_Indexed8
        if len(img.shape) == 3:
            if img.shape[2] == 4:
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888

        outImg = QImage(img.tobytes(), img.shape[1], img.shape[0], img.strides[0], qformat)
        outImg = outImg.rgbSwapped()
        return outImg 
Example #2
Source File: main.py    From BeautyCamera with MIT License 6 votes vote down vote up
def show_image(self):
        img_cv = cv2.cvtColor(self.current_img, cv2.COLOR_RGB2BGR)
        img_width, img_height, a = img_cv.shape
        ratio_img = img_width/img_height
        ratio_scene = self.ui.graphicsView.width()/self.ui.graphicsView.height()
        if ratio_img > ratio_scene:
            width = int(self.ui.graphicsView.width())
            height = int(self.ui.graphicsView.width() / ratio_img)
        else:
            width = int(self.ui.graphicsView.height() * ratio_img)
            height = int(self.ui.graphicsView.height())
        img_resize = cv2.resize(img_cv, (height-5, width-5), interpolation=cv2.INTER_AREA)
        h, w, c = img_resize.shape
        bytesPerLine = w * 3
        qimg = QImage(img_resize.data, w, h, bytesPerLine, QImage.Format_RGB888)
        self.scene = QGraphicsScene()
        pix = QPixmap(qimg)
        self.scene.addPixmap(pix)
        self.ui.graphicsView.setScene(self.scene)

# 显示灰度图像 
Example #3
Source File: demoBebopVisionGUI.py    From pyparrot with MIT License 6 votes vote down vote up
def draw_current_photo():
    """
    Quick demo of returning an image to show in the user window.  Clearly one would want to make this a dynamic image
    """
    image = cv2.imread('test_image_000001.png')

    if (image is not None):
        if len(image.shape) < 3 or image.shape[2] == 1:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        height, width, byteValue = image.shape
        byteValue = byteValue * width

        qimage = QImage(image, width, height, byteValue, QImage.Format_RGB888)

        return qimage
    else:
        return None 
Example #4
Source File: core.py    From face_recognition_py with GNU General Public License v3.0 6 votes vote down vote up
def displayImage(self, img, qlabel):
        # BGR -> RGB
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # default:The image is stored using 8-bit indexes into a colormap, for example:a gray image
        qformat = QImage.Format_Indexed8

        if len(img.shape) == 3:  # rows[0], cols[1], channels[2]
            if img.shape[2] == 4:
                # The image is stored using a 32-bit byte-ordered RGBA format (8-8-8-8)
                # A: alpha channel,不透明度参数。如果一个像素的alpha通道数值为0%,那它就是完全透明的
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888

        # img.shape[1]:图像宽度width,img.shape[0]:图像高度height,img.shape[2]:图像通道数
        # QImage.__init__ (self, bytes data, int width, int height, int bytesPerLine, Format format)
        # 从内存缓冲流获取img数据构造QImage类
        # img.strides[0]:每行的字节数(width*3),rgb为3,rgba为4
        # strides[0]为最外层(即一个二维数组所占的字节长度),strides[1]为次外层(即一维数组所占字节长度),strides[2]为最内层(即一个元素所占字节长度)
        # 从里往外看,strides[2]为1个字节长度(uint8),strides[1]为3*1个字节长度(3即rgb 3个通道)
        # strides[0]为width*3个字节长度,width代表一行有几个像素

        outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
        qlabel.setPixmap(QPixmap.fromImage(outImage))
        qlabel.setScaledContents(True)  # 图片自适应大小

    # 报警系统:是否允许设备响铃 
Example #5
Source File: gui_utilities.py    From CvStudio with MIT License 6 votes vote down vote up
def array_to_qimage(im: np.ndarray, copy=False):
        gray_color_table = [qRgb(i, i, i) for i in range(256)]
        if im is None:
            return QImage()
        if im.dtype == np.uint8:
            if len(im.shape) == 2:
                qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_Indexed8)
                qim.setColorTable(gray_color_table)
                return qim.copy() if copy else qim

            elif len(im.shape) == 3:
                if im.shape[2] == 3:
                    qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGB888);
                    return qim.copy() if copy else qim
                elif im.shape[2] == 4:
                    qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_ARGB32);
                    return qim.copy() if copy else qim 
Example #6
Source File: image_widget.py    From robovision with GNU General Public License v3.0 5 votes vote down vote up
def get_qimage(self, image: np.ndarray):
        height, width, colors = image.shape
        bytesPerLine = 3 * width

        # composing image from image data
        image = QImage(image.data,
                       width,
                       height,
                       bytesPerLine,
                       QImage.Format_RGB888)

        image = image.rgbSwapped()
        return image 
Example #7
Source File: run.py    From reconet with MIT License 5 votes vote down vote up
def run(self):
        cap = cv2.VideoCapture(0)
        fps_update_cnt = 0
        fps_update_num = 10
        while True:
            display_time = time.time()
            ret, x_np = cap.read()
            x_np = cv2.cvtColor(x_np, cv2.COLOR_BGR2RGB)

            y_np, inference_time = self.transfer(x_np)

            x_qt = QImage(x_np.data, x_np.shape[1], x_np.shape[0], QImage.Format_RGB888)
            x_qt = QPixmap.fromImage(x_qt)
            x_qt = x_qt.scaled(self.video_width, self.video_height, Qt.KeepAspectRatio)

            y_qt = QImage(y_np.data, y_np.shape[1], y_np.shape[0], QImage.Format_RGB888)
            y_qt = QPixmap.fromImage(y_qt)
            y_qt = y_qt.scaled(self.video_width, self.video_height, Qt.KeepAspectRatio)

            self.change_pixmap_x.emit(x_qt)
            self.change_pixmap_y.emit(y_qt)

            fps_update_cnt = (fps_update_cnt + 1) % fps_update_num
            if fps_update_cnt == 0:
                self.change_pixmap_inf.emit('    Infrence FPS: {0:.2f}'.format(
                    1 / inference_time if inference_time is not None else 0))
                display_time = time.time() - display_time
                self.change_pixmap_dis.emit('    Display FPS: {0:.2f}'.format(1 / display_time)) 
Example #8
Source File: dataRecord.py    From face_recognition_py with GNU General Public License v3.0 5 votes vote down vote up
def displayImage(self, img):
        # BGR -> RGB
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # default:The image is stored using 8-bit indexes into a colormap, for example:a gray image
        qformat = QImage.Format_Indexed8

        if len(img.shape) == 3:  # rows[0], cols[1], channels[2]
            if img.shape[2] == 4:
                # The image is stored using a 32-bit byte-ordered RGBA format (8-8-8-8)
                # A: alpha channel,不透明度参数。如果一个像素的alpha通道数值为0%,那它就是完全透明的
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888

        # img.shape[1]:图像宽度width,img.shape[0]:图像高度height,img.shape[2]:图像通道数
        # QImage.__init__ (self, bytes data, int width, int height, int bytesPerLine, Format format)
        # 从内存缓冲流获取img数据构造QImage类
        # img.strides[0]:每行的字节数(width*3),rgb为3,rgba为4
        # strides[0]为最外层(即一个二维数组所占的字节长度),strides[1]为次外层(即一维数组所占字节长度),strides[2]为最内层(即一个元素所占字节长度)
        # 从里往外看,strides[2]为1个字节长度(uint8),strides[1]为3*1个字节长度(3即rgb 3个通道)
        # strides[0]为width*3个字节长度,width代表一行有几个像素

        outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
        self.faceDetectCaptureLabel.setPixmap(QPixmap.fromImage(outImage))
        self.faceDetectCaptureLabel.setScaledContents(True)

    # 初始化数据库 
Example #9
Source File: imutils.py    From SickZil-Machine with GNU Affero General Public License v3.0 5 votes vote down vote up
def qimg2nparr(qimg): 
    ''' convert rgb qimg -> cv2 bgr image '''
    #NOTE: it would be changed or extended to input image shape 
    # Now it just used for canvas stroke.. but in the future... I don't know :(

    #qimg = qimg.convertToFormat(QImage.Format_RGB32)
    #qimg = qimg.convertToFormat(QImage.Format_RGB888)
    h,w = qimg.height(), qimg.width()
    ptr = qimg.constBits()
    ptr.setsize(h * w * 4)
    print(h,w,ptr)
    return np.frombuffer(ptr, np.uint8).reshape(h, w, 4)  #  Copies the data
    #return np.array(ptr).reshape(h, w, 3)  #  Copies the data 
Example #10
Source File: imutils.py    From SickZil-Machine with GNU Affero General Public License v3.0 5 votes vote down vote up
def nparr2qimg(cvimg):
    ''' convert cv2 bgr image -> rgb qimg '''
    h,w,c = cvimg.shape
    byte_per_line = w * c #cvimg.step() #* step # NOTE:when image format problem..
    return QImage(cvimg.data, w,h, byte_per_line, 
                  QImage.Format_RGB888).rgbSwapped() 
Example #11
Source File: main.py    From BeautyCamera with MIT License 5 votes vote down vote up
def show_camera(self):
        flag, self.camera_image = self.cap.read()
        show = cv2.resize(self.image, (640, 480))
        show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
        showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
        self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))

# 初始化 
Example #12
Source File: FacePoints.py    From PyQt with GNU General Public License v3.0 5 votes vote down vote up
def onCapture(self):
        _, frame = self.cap.read()

        minisize = (
            int(frame.shape[1] / DOWNSCALE), int(frame.shape[0] / DOWNSCALE))
        tmpframe = cv2.resize(frame, minisize)
        tmpframe = cv2.cvtColor(tmpframe, cv2.COLOR_BGR2GRAY)  # 做灰度处理
        tmpframe = cv2.equalizeHist(tmpframe)

        # minNeighbors表示每一个目标至少要被检测到5次
        faces = self.cascade.detectMultiScale(tmpframe, minNeighbors=5)
        del tmpframe
        if len(faces) < 1:  # 没有检测到脸
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            img = QImage(
                frame.data, frame.shape[1], frame.shape[0], frame.shape[1] * 3, QImage.Format_RGB888)
            del frame
            return self.setPixmap(QPixmap.fromImage(img))
        # 特征点检测描绘
        for x, y, w, h in faces:
            x, y, w, h = x * DOWNSCALE, y * DOWNSCALE, w * DOWNSCALE, h * DOWNSCALE
            # 画脸矩形
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0))
            # 截取的人脸部分
            tmpframe = frame[y:y + h, x:x + w]
            # 进行特征点描绘
            rects = self.detector(tmpframe, 1)
            if len(rects) > 0:
                landmarks = numpy.matrix(
                    [[p.x, p.y] for p in self.predictor(tmpframe, rects[0]).parts()])
                for _, point in enumerate(landmarks):
                    pos = (point[0, 0] + x, point[0, 1] + y)
                    # 在原来画面上画点
                    cv2.circle(frame, pos, 3, color=(0, 255, 0))
            # 转成Qt能显示的
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            img = QImage(
                frame.data, frame.shape[1], frame.shape[0], frame.shape[1] * 3, QImage.Format_RGB888)
            del frame
            self.setPixmap(QPixmap.fromImage(img)) 
Example #13
Source File: gui.py    From PUBGIS with GNU General Public License v3.0 5 votes vote down vote up
def _update_view_with_image(view, image_array):
        image = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
        height, width, _ = image.shape
        bytes_per_line = 3 * width
        qimg = QImage(image.data, width, height, bytes_per_line, QImage.Format_RGB888)

        view.scene().items()[0].setPixmap(QPixmap.fromImage(qimg))
        PUBGISMainWindow._fit_in_view(view,
                                      view.scene().itemsBoundingRect(),
                                      flags=Qt.KeepAspectRatio) 
Example #14
Source File: VideoMainWindow.py    From Insect_Identification with Apache License 2.0 5 votes vote down vote up
def convertFrame(self):
        """     converts frame to format suitable for QtGui            """
        try:
            height, width = self.currentFrame.shape[:2]
            img = QImage(self.currentFrame, width, height, QImage.Format_RGB888)
            img = QPixmap.fromImage(img)
            self.previousFrame = self.currentFrame
            return img
        except:
            return None 
Example #15
Source File: VideoMainWindow.py    From Insect_Identification with Apache License 2.0 5 votes vote down vote up
def convertSpecifiedFrame(frame):
        """     converts frame to format suitable for QtGui            """
        try:
            height, width = frame.shape[:2]
            img = QImage(frame, width, height, QImage.Format_RGB888)
            img = QPixmap.fromImage(img)
            return img
        except:
            return None 
Example #16
Source File: VideoMainWindow.py    From Insect_Identification with Apache License 2.0 5 votes vote down vote up
def drawpic(self,FFRRAAMM):
        """     converts frame to format suitable for QtGui            """
        try:
            height, width = FFRRAAMM.shape[:2]
            img = QImage(FFRRAAMM, width, height, QImage.Format_RGB888)
            img = QPixmap.fromImage(img)
            return img
        except:
            return None 
Example #17
Source File: Ui_MakupGUI.py    From AIMakeup with Apache License 2.0 5 votes vote down vote up
def _cv2qimg(self,cvImg):
        '''
        将opencv的图片转换为QImage
        '''
        height, width, channel = cvImg.shape
        bytesPerLine = 3 * width
        return QImage(cv2.cvtColor(cvImg,cv2.COLOR_BGR2RGB).data, width, height, bytesPerLine, QImage.Format_RGB888) 
Example #18
Source File: MainWindow.py    From Traffic-Rules-Violation-Detection-System with GNU General Public License v3.0 5 votes vote down vote up
def toQImage(self, raw_img):
        from numpy import copy
        img = copy(raw_img)
        qformat = QImage.Format_Indexed8
        if len(img.shape) == 3:
            if img.shape[2] == 4:
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888

        outImg = QImage(img.tobytes(), img.shape[1], img.shape[0], img.strides[0], qformat)
        outImg = outImg.rgbSwapped()
        return outImg 
Example #19
Source File: SolidView.py    From Cura with GNU Lesser General Public License v3.0 4 votes vote down vote up
def endRendering(self):
        # check whether the xray overlay is showing badness
        if time.time() > self._next_xray_checking_time\
                and Application.getInstance().getPreferences().getValue(self._show_xray_warning_preference):
            self._next_xray_checking_time = time.time() + self._xray_checking_update_time

            xray_img = self._xray_pass.getOutput()
            xray_img = xray_img.convertToFormat(QImage.Format_RGB888)

            # We can't just read the image since the pixels are aligned to internal memory positions.
            # xray_img.byteCount() != xray_img.width() * xray_img.height() * 3
            # The byte count is a little higher sometimes. We need to check the data per line, but fast using Numpy.
            # See https://stackoverflow.com/questions/5810970/get-raw-data-from-qimage for a description of the problem.
            # We can't use that solution though, since it doesn't perform well in Python.
            class QImageArrayView:
                """
                Class that ducktypes to be a Numpy ndarray.
                """
                def __init__(self, qimage):
                    bits_pointer = qimage.bits()
                    if bits_pointer is None:  # If this happens before there is a window.
                        self.__array_interface__ = {
                            "shape": (0, 0),
                            "typestr": "|u4",
                            "data": (0, False),
                            "strides": (1, 3),
                            "version": 3
                        }
                    else:
                        self.__array_interface__ = {
                            "shape": (qimage.height(), qimage.width()),
                            "typestr": "|u4", # Use 4 bytes per pixel rather than 3, since Numpy doesn't support 3.
                            "data": (int(bits_pointer), False),
                            "strides": (qimage.bytesPerLine(), 3),  # This does the magic: For each line, skip the correct number of bytes. Bytes per pixel is always 3 due to QImage.Format.Format_RGB888.
                            "version": 3
                        }
            array = np.asarray(QImageArrayView(xray_img)).view(np.dtype({
                "r": (np.uint8, 0, "red"),
                "g": (np.uint8, 1, "green"),
                "b": (np.uint8, 2, "blue"),
                "a": (np.uint8, 3, "alpha")  # Never filled since QImage was reformatted to RGB888.
            }), np.recarray)
            if np.any(np.mod(array.r, 2)):
                self._next_xray_checking_time = time.time() + self._xray_warning_cooldown
                self._xray_warning_message.show()
                Logger.log("i", "X-Ray overlay found non-manifold pixels.")