Python cv2.imdecode() Examples

The following are 30 code examples of cv2.imdecode(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: captcha_generator.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 9 votes vote down vote up
def image(self, captcha_str):
        """
        Generate a greyscale captcha image representing number string

        Parameters
        ----------
        captcha_str: str
            string a characters for captcha image

        Returns
        -------
        numpy.ndarray
            Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
        """
        img = self.captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (self.h, self.w))
        img = img.transpose(1, 0)
        img = np.multiply(img, 1 / 255.0)
        return img 
Example #2
Source File: bagdump.py    From udacity-driving-reader with Apache License 2.0 7 votes vote down vote up
def write_image(bridge, outdir, msg, fmt='png'):
    results = {}
    image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt)
    try:
        if hasattr(msg, 'format') and 'compressed' in msg.format:
            buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data)
            cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
            if cv_image.shape[2] != 3:
                print("Invalid image %s" % image_filename)
                return results
            results['height'] = cv_image.shape[0]
            results['width'] = cv_image.shape[1]
            # Avoid re-encoding if we don't have to
            if check_format(msg.data) == fmt:
                buf.tofile(image_filename)
            else:
                cv2.imwrite(image_filename, cv_image)
        else:
            cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
            cv2.imwrite(image_filename, cv_image)
    except CvBridgeError as e:
        print(e)
    results['filename'] = image_filename
    return results 
Example #3
Source File: image_helper.py    From openseg.pytorch with MIT License 7 votes vote down vote up
def imfrombytes(content, flag='color'):
        """Read an image from bytes.

        Args:
            content (bytes): Image bytes got from files or other streams.
            flag (str): Same as :func:`imread`.

        Returns:
            ndarray: Loaded image array.
        """
        imread_flags = {
            'color': cv2.IMREAD_COLOR,
            'grayscale': cv2.IMREAD_GRAYSCALE,
            'unchanged': cv2.IMREAD_UNCHANGED
        }
        img_np = np.fromstring(content, np.uint8)
        flag = imread_flags[flag] if isinstance(flag, str) else flag
        img = cv2.imdecode(img_np, flag)
        return img 
Example #4
Source File: data_feeder.py    From tf-lcnn with GNU General Public License v3.0 6 votes vote down vote up
def get_data(self):
        idxs = np.arange(len(self.train_list))
        if self.shuffle:
            self.rng.shuffle(idxs)

        caches = {}
        for i, k in enumerate(idxs):
            path = self.train_list[k]
            label = self.lb_list[k]

            if i % self.preload == 0:
                try:
                    caches = ILSVRCTenth._read_tenth_batch(self.train_list[idxs[i:i+self.preload]])
                except Exception as e:
                    logging.warning('tenth local cache failed, err=%s' % str(e))

            content = caches.get(path, '')
            if not content:
                content = ILSVRCTenth._read_tenth(path)

            img = cv2.imdecode(np.fromstring(content, dtype=np.uint8), cv2.IMREAD_COLOR)
            yield [img, label] 
Example #5
Source File: create_dataset.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def checkImageIsValid(imageBin):
    if imageBin is None:
        return False
    imageBuf = np.fromstring(imageBin, dtype=np.uint8)
    img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
    imgH, imgW = img.shape[0], img.shape[1]
    if imgH * imgW == 0:
        return False
    return True 
Example #6
Source File: camera_pi.py    From object-detection with MIT License 6 votes vote down vote up
def frames():
        with PiCamera() as camera:
            camera.rotation = int(str(os.environ['CAMERA_ROTATION']))
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream, 'jpeg',
                                               use_video_port=True):
                # return current frame
                stream.seek(0)
                _stream = stream.getvalue()
                data = np.fromstring(_stream, dtype=np.uint8)
                img = cv2.imdecode(data, 1)
                yield img

                # reset stream for next frame
                stream.seek(0)
                stream.truncate() 
Example #7
Source File: captcha_generator.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def image(self, captcha_str):
        """
        Generate a greyscale captcha image representing number string

        Parameters
        ----------
        captcha_str: str
            string a characters for captcha image

        Returns
        -------
        numpy.ndarray
            Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
        """
        img = self.captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (self.h, self.w))
        img = img.transpose(1, 0)
        img = np.multiply(img, 1 / 255.0)
        return img 
Example #8
Source File: noise.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1).astype(img.dtype) 
Example #9
Source File: opencv.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def imdecode(str_img, flag=1):
    """Decode image from str buffer.
    Wrapper for cv2.imdecode that uses mx.nd.NDArray

    Parameters
    ----------
    str_img : str
        str buffer read from image file
    flag : int
        same as flag for cv2.imdecode
    Returns
    -------
    img : NDArray
        decoded image in (width, height, channels)
        with BGR color channel order
    """
    hdl = NDArrayHandle()
    check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img),
                                 mx_uint(len(str_img)),
                                 flag, ctypes.byref(hdl)))
    return mx.nd.NDArray(hdl) 
Example #10
Source File: captcha_generator.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def main():
        parser = argparse.ArgumentParser()
        parser.add_argument("font_path", help="Path to ttf font file")
        parser.add_argument("output", help="Output filename including extension (e.g. 'sample.jpg')")
        parser.add_argument("--num", help="Up to 4 digit number [Default: random]")
        args = parser.parse_args()

        captcha = ImageCaptcha(fonts=[args.font_path])
        captcha_str = args.num if args.num else DigitCaptcha.get_rand(3, 4)
        img = captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        cv2.imwrite(args.output, img)
        print("Captcha image with digits {} written to {}".format([int(c) for c in captcha_str], args.output)) 
Example #11
Source File: zipreader.py    From PoseWarper with Apache License 2.0 6 votes vote down vote up
def imread(filename, flags=cv2.IMREAD_COLOR):
    global _im_zfile
    path = filename
    pos_at = path.index('@')
    if pos_at == -1:
        print("character '@' is not found from the given path '%s'"%(path))
        assert 0
    path_zip = path[0: pos_at]
    path_img = path[pos_at + 2:]
    if not os.path.isfile(path_zip):
        print("zip file '%s' is not found"%(path_zip))
        assert 0
    for i in range(len(_im_zfile)):
        if _im_zfile[i]['path'] == path_zip:
            data = _im_zfile[i]['zipfile'].read(path_img)
            return cv2.imdecode(np.frombuffer(data, np.uint8), flags)

    _im_zfile.append({
        'path': path_zip,
        'zipfile': zipfile.ZipFile(path_zip, 'r')
    })
    data = _im_zfile[-1]['zipfile'].read(path_img)

    return cv2.imdecode(np.frombuffer(data, np.uint8), flags) 
Example #12
Source File: benchmark-dataflow.py    From benchmarks with The Unlicense 6 votes vote down vote up
def test_lmdb_inference(db, augs, batch):
    ds = LMDBData(db, shuffle=False)
    # ds = LocallyShuffleData(ds, 50000)

    augs = AugmentorList(augs)

    def mapper(data):
        im, label = loads(data[1])
        im = cv2.imdecode(im, cv2.IMREAD_COLOR)
        im = augs.augment(im)
        return im, label

    ds = MultiProcessMapData(ds, 40, mapper,
                             buffer_size=200)
    # ds = MultiThreadMapData(ds, 40, mapper, buffer_size=2000)

    ds = BatchData(ds, batch)
    ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
Example #13
Source File: RtspClient.py    From ReolinkCameraAPI with GNU General Public License v3.0 6 votes vote down vote up
def get_frame(self) -> bytearray:
        try:
            self.sockt.send(str.encode(self.url))
            data = b''
            while True:
                try:
                    r = self.sockt.recv(90456)
                    if len(r) == 0:
                        break
                    a = r.find(b'END!')
                    if a != -1:
                        data += r[:a]
                        break
                    data += r
                except Exception as e:
                    print(e)
                    continue
            nparr = numpy.fromstring(data, numpy.uint8)
            frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            return frame
        except Exception as e:
            print(e) 
Example #14
Source File: corruptions.py    From robustness with Apache License 2.0 6 votes vote down vote up
def motion_blur(x, severity=1):
    c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (224, 224):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
Example #15
Source File: make_imagenet_c.py    From robustness with Apache License 2.0 6 votes vote down vote up
def motion_blur(x, severity=1):
    c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (224, 224):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
Example #16
Source File: make_tinyimagenet_c.py    From robustness with Apache License 2.0 6 votes vote down vote up
def motion_blur(x, severity=1):
    c = [(10,1), (10,1.5), (10,2), (10,2.5), (12,3)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (64, 64):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
Example #17
Source File: make_imagenet_c_inception.py    From robustness with Apache License 2.0 6 votes vote down vote up
def motion_blur(x, severity=1):
    c = [(12,4), (17,6), (17, 9), (17,13), (22,16)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (299, 299):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
Example #18
Source File: opencv_adapter.py    From gabriel with Apache License 2.0 6 votes vote down vote up
def consumer(self, result_wrapper):
        if len(result_wrapper.results) != 1:
            logger.error('Got %d results from server',
                         len(result_wrapper.results))
            return

        result = result_wrapper.results[0]
        if result.payload_type != gabriel_pb2.PayloadType.IMAGE:
            type_name = gabriel_pb2.PayloadType.Name(result.payload_type)
            logger.error('Got result of type %s', type_name)
            return

        np_data = np.fromstring(result.payload, dtype=np.uint8)
        frame = cv2.imdecode(np_data, cv2.IMREAD_COLOR)

        self._consume_frame(frame, result_wrapper.extras) 
Example #19
Source File: device.py    From fgo-bot with MIT License 6 votes vote down vote up
def capture(self, method=FROM_SHELL) -> Union[np.ndarray, None]:
        """
        Capture the screen.

        :return: a cv2 image as numpy ndarray
        """
        if method == self.FROM_SHELL:
            self.logger.debug('Capturing screen from shell...')
            img = self.__run_cmd(['shell', 'screencap -p'], raw=True)
            img = self.__png_sanitize(img)
            img = np.frombuffer(img, np.uint8)
            img = cv.imdecode(img, cv.IMREAD_COLOR)
            return img
        elif method == self.SDCARD_PULL:
            self.logger.debug('Capturing screen from sdcard pull...')
            self.__run_cmd(['shell', 'screencap -p /sdcard/sc.png'])
            self.__run_cmd(['pull', '/sdcard/sc.png', './sc.png'])
            img = cv.imread('./sc.png', cv.IMREAD_COLOR)
            return img
        else:
            self.logger.error('Unsupported screen capturing method.')
            return None 
Example #20
Source File: engine_cv3.py    From opencv-engine with MIT License 6 votes vote down vote up
def create_image(self, buffer):
        # FIXME: opencv doesn't support gifs, even worse, the library
        # segfaults when trying to decoding a gif. An exception is a
        # less drastic measure.
        try:
            if FORMATS[self.extension] == 'GIF':
                raise ValueError("opencv doesn't support gifs")
        except KeyError:
            pass

        img = cv2.imdecode(np.frombuffer(buffer, np.uint8), -1)
        if FORMATS[self.extension] == 'JPEG':
            self.exif = None
            try:
                info = JpegFile.fromString(buffer).get_exif()
                if info:
                    self.exif = info.data
                    self.exif_marker = info.marker
            except Exception:
                pass
        return img 
Example #21
Source File: captcha_generator.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def main():
        parser = argparse.ArgumentParser()
        parser.add_argument("font_path", help="Path to ttf font file")
        parser.add_argument("output", help="Output filename including extension (e.g. 'sample.jpg')")
        parser.add_argument("--num", help="Up to 4 digit number [Default: random]")
        args = parser.parse_args()

        captcha = ImageCaptcha(fonts=[args.font_path])
        captcha_str = args.num if args.num else DigitCaptcha.get_rand(3, 4)
        img = captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        cv2.imwrite(args.output, img)
        print("Captcha image with digits {} written to {}".format([int(c) for c in captcha_str], args.output)) 
Example #22
Source File: accuracy_test.py    From EasyPR-python with Apache License 2.0 5 votes vote down vote up
def accuracy_test(data_dir):
    print("Begin to test accuracy")
    count = [0, 0]  # total images, correct images
    not_recognized_names = []
    image_names = os.listdir(data_dir)
    starttime = time.time()

    for image_name in image_names:
        print('-' * 8)
        count[0] += 1
        label = image_name.split('.')[0]
        # read Chinese plate
        src = cv2.imdecode(np.fromfile(os.path.join(data_dir, image_name), dtype=np.uint8), cv2.IMREAD_COLOR)
        print("Label: ", label)
        time0 = time.time()
        results = plate_detect(src)
        for res in results:
            vis_image = align(src, res)
            rec_res = chars_recognize(vis_image)
            print("Chars Recognise: ", rec_res)
            if label == rec_res:
                count[1] += 1
                break
            else:
                if cfg.DEBUG:
                    plt.title(rec_res)
                    plt.imshow(cv2.cvtColor(vis_image, cv2.COLOR_BGR2RGB))
                    plt.show()
        print("time: {}s".format(time.time() - time0))
        print('-' * 8)

    endtime = time.time()
    print("Accuracy test end!")
    print("Summary:")
    print("Total time: {:.2f}s, Average time: {:.2f}s".format(endtime - starttime, (endtime - starttime) / count[0]))
    print("Accuracy: {:.2f}%({})".format(count[1] / count[0] * 100, count[0]))
    print("Not recognize: ")
    for pic in not_recognized_names:
        print(pic) 
Example #23
Source File: extract_images.py    From pytorch_image_classification with MIT License 5 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', type=str, required=True)
    parser.add_argument('--output-dir', '-o', type=str)
    args = parser.parse_args()

    event_acc = event_accumulator.EventAccumulator(args.path,
                                                   size_guidance={'images': 0})
    event_acc.Reload()

    if args.output_dir is not None:
        output_dir = pathlib.Path(args.output_dir)
    else:
        output_dir = pathlib.Path(args.path).parent / 'images'
    output_dir.mkdir(exist_ok=True, parents=True)

    for tag in event_acc.Tags()['images']:
        events = event_acc.Images(tag)

        tag_name = tag.replace('/', '_')
        dirpath = output_dir / tag_name
        dirpath.mkdir(exist_ok=True, parents=True)

        for index, event in enumerate(events):
            s = np.frombuffer(event.encoded_image_string, dtype=np.uint8)
            image = cv2.imdecode(s, cv2.IMREAD_COLOR)
            outpath = dirpath / f'{index:04}.jpg'
            cv2.imwrite(outpath.as_posix(), image) 
Example #24
Source File: localVerifyCode.py    From 12306 with MIT License 5 votes vote down vote up
def base64_to_image(base64_code):
    # base64解码
    img_data = base64.b64decode(base64_code)
    # 转换为np数组
    img_array = np.fromstring(img_data, np.uint8)
    # 转换成opencv可用格式
    img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)

    return img 
Example #25
Source File: codecs.py    From petastorm with Apache License 2.0 5 votes vote down vote up
def decode(self, unischema_field, value):
        """Decodes the image using OpenCV."""

        # cv returns a BGR or grayscale image. Convert to RGB (unless a grayscale image).
        image_bgr_or_gray = cv2.imdecode(np.frombuffer(value, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
        if len(image_bgr_or_gray.shape) == 2:
            # Greyscale image
            return image_bgr_or_gray
        elif len(image_bgr_or_gray.shape) == 3 and image_bgr_or_gray.shape[2] == 3:
            # Convert BGR to RGB (opencv assumes BGR)
            image_rgb = image_bgr_or_gray[:, :, (2, 1, 0)]
            return image_rgb
        else:
            raise ValueError('Unexpected image dimensions. Supported dimensions are (H, W) or (H, W, 3). '
                             'Got {}'.format(image_bgr_or_gray.shape)) 
Example #26
Source File: pymini_yolo.py    From vrequest with MIT License 5 votes vote down vote up
def get_all_draw_rects(filename, state):
    net = state['net']
    anchors = state['anchors']
    class_types = state['class_types']
    npimg = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), -1)
    height, width = npimg.shape[:2]
    npimg = cv2.cvtColor(npimg, cv2.COLOR_BGR2RGB) # [y,x,c]
    npimg = cv2.resize(npimg, (416, 416))
    npimg_ = np.transpose(npimg, (2,1,0)) # [c,x,y]
    y_pred = net(torch.FloatTensor(npimg_).unsqueeze(0).to(DEVICE))
    v = parse_y_pred(y_pred, anchors, class_types, islist=True, threshold=0.2, nms_threshold=0.4)
    r = []
    for i in v:
        rect, clz, con, log_cons = i
        rw, rh = width/416, height/416
        rect[0],rect[2] = int(rect[0]*rw),int(rect[2]*rw)
        rect[1],rect[3] = int(rect[1]*rh),int(rect[3]*rh)
        r.append([rect, clz, con, log_cons])
    # 绘制所有定位的框
    img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), -1)
    for i in r:
        rect, clz, con, log_cons = i
        img = drawrect(img, rect, '{}|{:<.2f}'.format(clz,con))
    cv2.imshow('test', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #27
Source File: utils.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def string_2_img(pngstr):
    nparr = np.fromstring(pngstr, np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    return img 
Example #28
Source File: aircv.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def imread(filename, flatten=False):
    """根据图片路径,将图片读取为cv2的图片处理格式."""
    if not os.path.isfile(filename):
        raise FileNotExistError("File not exist: %s" % filename)

    # choose image readin mode: cv2.IMREAD_UNCHANGED=-1, cv2.IMREAD_GRAYSCALE=0, cv2.IMREAD_COLOR=1,
    readin_mode = cv2.IMREAD_GRAYSCALE if flatten else cv2.IMREAD_COLOR

    if PY3:
        img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), readin_mode)
    else:
        filename = filename.encode(sys.getfilesystemencoding())
        img = cv2.imread(filename, readin_mode)

    return img 
Example #29
Source File: recognizer.py    From njucaptcha with GNU General Public License v2.0 5 votes vote down vote up
def get_captcha(captcha_str):
    nparr = np.fromstring(captcha_str, np.uint8)
    ptcha = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR)
    captcha.shape = -1,
    captcha = (captcha[::3].astype(np.int) + captcha[1::3].astype(np.int) + captcha[2::3].astype(np.int)) / 3
    captcha = (255 - captcha).astype(np.uint8)
    return captcha 
Example #30
Source File: dataset.py    From EasyPR-python with Apache License 2.0 5 votes vote down vote up
def record_process(self, record, gray=False):
        """record process
        Args: record
        Returns:
          image: 3-D ndarray
          labels: 2-D list
        """
        if gray:
            image = cv2.imdecode(np.fromfile(record[0], dtype=np.uint8), cv2.IMREAD_GRAYSCALE)[..., None]
        else:
            image = cv2.imdecode(np.fromfile(record[0], dtype=np.uint8), cv2.IMREAD_COLOR)
        return [image, record[1]]