Python PIL.Image() Examples

The following are 30 code examples of PIL.Image(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL , or try the search function .
Example #1
Source File: utils.py    From noise2noise-pytorch with MIT License 7 votes vote down vote up
def load_hdr_as_tensor(img_path):
    """Converts OpenEXR image to torch float tensor."""

    # Read OpenEXR file
    if not OpenEXR.isOpenExrFile(img_path):
        raise ValueError(f'Image {img_path} is not a valid OpenEXR file')
    src = OpenEXR.InputFile(img_path)
    pixel_type = Imath.PixelType(Imath.PixelType.FLOAT)
    dw = src.header()['dataWindow']
    size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
    
    # Read into tensor
    tensor = torch.zeros((3, size[1], size[0]))
    for i, c in enumerate('RGB'):
        rgb32f = np.fromstring(src.channel(c, pixel_type), dtype=np.float32)
        tensor[i, :, :] = torch.from_numpy(rgb32f.reshape(size[1], size[0]))
        
    return tensor 
Example #2
Source File: utils.py    From centerpose with MIT License 6 votes vote down vote up
def make_all_grids(tensors, nrow=8, padding=2,
                   normalize=False, range=None, scale_each=False, pad_value=0):
    """Save a given Tensor into an image file.

    Args:
        tensors (list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images_l1loss_ssim by calling ``make_grid``.
        **kwargs: Other arguments are documented in ``make_grid``.
    """
    from PIL import Image

    ndarr = None
    for tensor in tensors:
        grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
                         normalize=normalize, range=range, scale_each=scale_each)
        # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
        if ndarr is None:
            ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
        else:
            ndarr = np.hstack(
                (ndarr, grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()))

    return ndarr 
Example #3
Source File: pre_processor.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def per_image_standardization(image):
    """Image standardization per image.

    https://www.tensorflow.org/api_docs/python/image/image_adjustments#per_image_standardization

    Args:
        image: An image numpy array.

    """
    image = image.astype(np.float32)
    mean = image.mean()
    stddev = np.std(image)
    adjusted_stddev = max(stddev, 1.0 / np.sqrt(image.size))

    image -= mean
    image /= adjusted_stddev

    return image 
Example #4
Source File: general.py    From mxbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (PIL.Image): Image to be scaled.

        Returns:
            PIL.Image: Rescaled image.
        """
        if isinstance(self.size, int):
            w, h = img.size
            if (w <= h and w == self.size) or (h <= w and h == self.size):
                return img
            if w < h:
                ow = self.size
                oh = int(self.size * h / w)
                return img.resize((ow, oh), self.interpolation)
            else:
                oh = self.size
                ow = int(self.size * w / h)
                return img.resize((ow, oh), self.interpolation)
        else:
            return img.resize(self.size, self.interpolation) 
Example #5
Source File: pre_processor.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def resize(image, size=[256, 256], resample="NEAREST"):
    """Resize an image.

    Args:
        image (np.ndarray): an image numpy array.
        size: [height, width]
        resample (str): A name of resampling filter

    """
    width = size[1]
    height = size[0]

    if width == image.shape[1] and height == image.shape[0]:
        return image

    image = PIL.Image.fromarray(np.uint8(image))

    image = image.resize([width, height], RESAMPLE_METHODS[resample])

    image = np.array(image)
    assert image.shape[0] == height
    assert image.shape[1] == width

    return image 
Example #6
Source File: inputs.py    From gradio-UI with Apache License 2.0 6 votes vote down vote up
def preprocess(self, inp):
        """
        Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
        """
        im_transparent = preprocessing_utils.decode_base64_to_image(inp)
        im = PIL.Image.new("RGBA", im_transparent.size, "WHITE")  # Create a white background for the alpha channel
        im.paste(im_transparent, (0, 0), im_transparent)
        im = im.convert('L')
        if self.invert_colors:
            im = PIL.ImageOps.invert(im)
        im = im.resize((self.image_width, self.image_height))
        if self.flatten:
            array = np.array(im).flatten().reshape(1, self.image_width * self.image_height)
        else:
            array = np.array(im).flatten().reshape(1, self.image_width, self.image_height)
        array = array * self.scale + self.shift
        array = array.astype(self.dtype)
        return array 
Example #7
Source File: inputs.py    From gradio-UI with Apache License 2.0 6 votes vote down vote up
def preprocess(self, inp):
        """
        Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
        """
        im_transparent = preprocessing_utils.decode_base64_to_image(inp)
        im = PIL.Image.new("RGBA", im_transparent.size, "WHITE")  # Create a white background for the alpha channel
        im.paste(im_transparent, (0, 0), im_transparent)
        im = im.convert('L')
        if self.invert_colors:
            im = PIL.ImageOps.invert(im)
        im = im.resize((self.image_width, self.image_height))
        if self.flatten:
            array = np.array(im).flatten().reshape(1, self.image_width * self.image_height)
        else:
            array = np.array(im).flatten().reshape(1, self.image_width, self.image_height)
        array = array * self.scale + self.shift
        array = array.astype(self.dtype)
        return array 
Example #8
Source File: visualize.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def visualize_semantic_segmentation(image, post_processed, config):
    """Draw semantic segmentation result mask to image.

    Args:
        image (np.ndarray): A inference input RGB image to be draw.
        post_processed (np.ndarray): A one batch output of model be already applied post process.
            format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md
        config (EasyDict): Inference config.

    Returns:
        PIL.Image.Image: drawn image object.

    """
    colormap = np.array(get_color_map(len(config.CLASSES)), dtype=np.uint8)

    alpha = 0.5
    image_height = image.shape[0]
    image_width = image.shape[1]
    mask_image = label_to_color_image(np.expand_dims(post_processed, 0), colormap)
    mask_img = PIL.Image.fromarray(mask_image)
    mask_img = mask_img.resize(size=(image_width, image_height))
    result = PIL.Image.blend(PIL.Image.fromarray(image), mask_img, alpha)

    return result 
Example #9
Source File: utils.py    From PythonHomework with MIT License 6 votes vote down vote up
def draw_text(
    img: Image,
    text: str,
    location: tuple = (0, 0),
    text_color=(0, 0, 0)
) -> Image:
    draw = ImageDraw.Draw(img)

    try:
        # For Linux
        font = ImageFont.truetype("DejaVuSans.ttf", 20)
    except Exception:
        logger.warning("No font DejaVuSans; use default instead")
        # For others
        font = ImageFont.load_default()
    draw.text(location, text, font=font, fill=text_color)
    return img 
Example #10
Source File: batch_image.py    From batchflow with Apache License 2.0 6 votes vote down vote up
def _assemble_component(self, result, *args, component='images', **kwargs):
        """ Assemble one component after parallel execution.

        Parameters
        ----------
        result : sequence, array_like
            Results after inbatch_parallel.
        component : str
            component to assemble
        """
        _ = args, kwargs
        if isinstance(result[0], PIL.Image.Image):
            setattr(self, component, np.asarray(result, dtype=object))
        else:
            try:
                setattr(self, component, np.stack(result))
            except ValueError:
                array_result = np.empty(len(result), dtype=object)
                array_result[:] = result
                setattr(self, component, array_result) 
Example #11
Source File: visualize.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def draw_fps(pil_image, fps, fps_only_network):
    """Draw FPS information to image object.

    Args:
        pil_image (PIL.Image.Image): Image object to be draw FPS.
        fps (float): Entire inference FPS .
        fps_only_network (float): FPS of network only (not pre/post process).

    Returns:

    """
    font_size = 20
    font_size_sub = 14
    text_color = (200, 200, 200)
    text = "FPS: {:.1f}".format(fps)
    text_sub = "FPS (Network only): {:.1f}".format(fps_only_network)

    draw = PIL.ImageDraw.Draw(pil_image)
    font = PIL.ImageFont.truetype(FONT, font_size)
    font_sub = PIL.ImageFont.truetype(FONT, font_size_sub)
    draw.text((10, pil_image.height - font_size - font_size_sub - 5), text, fill=text_color, font=font)
    draw.text((10, pil_image.height - font_size_sub - 5), text_sub, fill=text_color, font=font_sub) 
Example #12
Source File: COCO_for_DAVIS.py    From MOTSFusion with MIT License 6 votes vote down vote up
def load_ann(img,img_filename,annotation_filename):
    img_filename = img_filename.decode('utf-8')
    anns_for_img = self.filename_to_coco_anns[img_filename.split("/")[-1]]
    ann_id = int(annotation_filename.decode('utf-8'))
    ann = anns_for_img[ann_id]
    img_h, img_w = img.shape[:-1]

    if ann['area'] > 1 and isinstance(ann['segmentation'], list):
      segs = ann['segmentation']
      valid_segs = [np.asarray(p).reshape(-1, 2) for p in segs if len(p) >= 6]
      if len(valid_segs) < len(segs):
        print("Image {} has invalid polygons!".format(img_filename))
      output_ann = np.asarray(self.segmentation_to_mask(valid_segs, img_h, img_w), dtype='uint8')[
        ..., np.newaxis]  # Should be 1s and 0s
    else:
      output_ann = np.zeros((img_h, img_w, 1), dtype="uint8")

    return output_ann 
Example #13
Source File: backend_bases.py    From neural-network-animation with MIT License 6 votes vote down vote up
def draw_image(self, gc, x, y, im):
        """
        Draw the image instance into the current axes;

        *gc*
            a GraphicsContext containing clipping information

        *x*
            is the distance in pixels from the left hand side of the canvas.

        *y*
            the distance from the origin.  That is, if origin is
            upper, y is the distance from top.  If origin is lower, y
            is the distance from bottom

        *im*
            the :class:`matplotlib._image.Image` instance
        """
        raise NotImplementedError 
Example #14
Source File: utils.py    From ivre with GNU General Public License v3.0 6 votes vote down vote up
def _trim_image(img, tolerance):
        """Returns the tiniest `bbox` to trim `img`"""
        result = None
        for pixel in [(0, 0), (img.size[0] - 1, 0), (0, img.size[1] - 1),
                      (img.size[0] - 1, img.size[1] - 1)]:
            if result is not None and result[0] < pixel[0] < result[2] - 1 \
               and result[1] < pixel[1] < result[3] - 1:
                # This pixel is already removed by current result
                continue
            bkg = PIL.Image.new(img.mode, img.size, img.getpixel(pixel))
            diffbkg = PIL.ImageChops.difference(img, bkg)
            if tolerance:
                diffbkg = PIL.ImageChops.add(diffbkg, diffbkg, 2.0, -tolerance)
            bbox = diffbkg.getbbox()
            if not bbox:
                # Image no longer exists after trim
                return None
            if result is None:
                result = bbox
            elif _img_size(bbox) < _img_size(result):
                result = bbox
        return result 
Example #15
Source File: perf_vision_1.py    From Gap with Apache License 2.0 6 votes vote down vote up
def test_003():
    """ Vision/Image Performance Tests """
    global files, dir
    total = 0
    for _ in range(10):
        start = time.time()
        images = []
        labels = []
        for file in files:
             image = Image(dir + "/" + file, config=['flatten', 'grayscale', 'nostore'])
             images.append( image.data )
             labels.append( 1 )
        with h5py.File('tmp.h5', 'w') as hf:
            hf.create_dataset("images",  data=images)
            hf.create_dataset("labels",  data=labels)
        batch_time = time.time() - start
        print("BATCH", batch_time)
        total += batch_time
        os.remove("tmp.h5")
    print("VISION/IMAGE AVE", total / 10 ) 
Example #16
Source File: perf_vision_1.py    From Gap with Apache License 2.0 6 votes vote down vote up
def test_001():
    global files, dir
    """ PIL Performance Tests """
    total = 0
    for _ in range(10):
        start = time.time()
        images = []
        for file in files:
             pixels = PIL.Image.open(dir + "/" + file)
             image  = np.asarray(pixels)
             image  = image / 255.0
             image  = image.flatten()
             images.append(image)
        with h5py.File('tmp.h5', 'w') as hf:
            hf.create_dataset("images",  data=images)
        batch_time = time.time() - start
        print("BATCH", batch_time)
        total += batch_time
        os.remove("tmp.h5")
    print("PIL AVE", total / 10 ) 
Example #17
Source File: utils.py    From ivre with GNU General Public License v3.0 6 votes vote down vote up
def trim_image(imgdata, tolerance=1, minborder=10):
        """Trims the image, `tolerance` is an integer from 0 (not
        tolerant, trims region with the exact same color) to 255
        (too tolerant, will trim the whole image).

        """
        img = PIL.Image.open(BytesIO(imgdata))
        bbox = _trim_image(img, tolerance)
        if bbox:
            newbbox = (max(bbox[0] - minborder, 0),
                       max(bbox[1] - minborder, 0),
                       img.size[0] - max(img.size[0] - bbox[2] - minborder, 0),
                       img.size[1] - max(img.size[1] - bbox[3] - minborder, 0))
            if newbbox != (0, 0, img.size[0], img.size[1]):
                out = BytesIO()
                img.crop(newbbox).save(out, format='jpeg')
                out.seek(0)
                return out.read()
            # Image does not need to be modified
            return True
        # Image no longer exists after trim
        return False 
Example #18
Source File: utils.py    From centerpose with MIT License 6 votes vote down vote up
def save_image(tensors, filename, nrow=8, padding=2,
               normalize=False, range=None, scale_each=False, pad_value=0):
    """Save a given Tensor into an image file.

    Args:
        tensors (list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images_l1loss_ssim by calling ``make_grid``.
        **kwargs: Other arguments are documented in ``make_grid``.
    """
    from PIL import Image

    ndarr = None
    for tensor in tensors:
        grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
                         normalize=normalize, range=range, scale_each=scale_each)
        # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
        if ndarr is None:
            ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
        else:
            ndarr = np.hstack(
                (ndarr, grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()))

    # return ndarr
    cv2.imwrite(filename, ndarr) 
Example #19
Source File: epd7in5.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0x00] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] != 0:
                    buf[int((x + y * self.width) / 8)] |= 0x80 >> (x % 8)
        return buf 
Example #20
Source File: epd4in2.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] != 0:
                    buf[int((x + y * self.width) / 8)] |= 0x80 >> (x % 8)
        return buf 
Example #21
Source File: epd7in5b.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0x00] * int(self.width * self.height / 4)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode L.
        image_grayscale = image.convert('L')
        imwidth, imheight = image_grayscale.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_grayscale.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] < 64:           # black
                    buf[int((x + y * self.width) / 4)] &= ~(0xC0 >> (x % 4 * 2))
                elif pixels[x, y] < 192:     # convert gray to red
                    buf[int((x + y * self.width) / 4)] &= ~(0xC0 >> (x % 4 * 2))
                    buf[int((x + y * self.width) / 4)] |= 0x40 >> (x % 4 * 2)
                else:                           # white
                    buf[int((x + y * self.width) / 4)] |= 0xC0 >> (x % 4 * 2)
        return buf 
Example #22
Source File: epd2in9.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0x00] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] != 0:
                    buf[int((x + y * self.width) / 8)] |= 0x80 >> (x % 8)
        return buf

##
 #  @brief: put an image to the frame memory.
 #          this won't update the display.
 ## 
Example #23
Source File: epd2in13.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0x00] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] != 0:
                    buf[int((x + y * self.width) / 8)] |= 0x80 >> (x % 8)
        return buf

##
 #  @brief: put an image to the frame memory.
 #          this won't update the display.
 ## 
Example #24
Source File: epd9in7.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0x00] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] != 0:
                    buf[int((x + y * self.width) / 8)] |= 0x80 >> (x % 8)
        return buf 
Example #25
Source File: epd4in2b.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        buf = [0xFF] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError('Image must be same dimensions as display \
                ({0}x{1}).' .format(self.width, self.height))

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels at the current position.
                if pixels[x, y] == 0:
                    buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
        return buf 
Example #26
Source File: epd1in54.py    From epd-library-python with GNU General Public License v3.0 6 votes vote down vote up
def get_frame_buffer(self, image):
        '''
        @brief: convert an image to a buffer
        '''
        buf = [0x00] * int(self.width * self.height / 8)
        # Set buffer to value of Python Imaging Library image.
        # Image must be in mode 1.
        image_monocolor = image.convert('1')
        imwidth, imheight = image_monocolor.size
        if imwidth != self.width or imheight != self.height:
            raise ValueError(
                'Image must be same dimensions as display ({0}x{1}).'.format(
                    self.width, self.height
                )
            )

        pixels = image_monocolor.load()
        for y in range(self.height):
            for x in range(self.width):
                # Set the bits for the column of pixels
                # at the current position.
                if pixels[x, y] != 0:
                    buf[int((x + y * self.width) / 8)] |= 0x80 >> (x % 8)
        return buf 
Example #27
Source File: data.py    From SCAN with Apache License 2.0 6 votes vote down vote up
def __init__(self, data_path, data_split, vocab):
        self.vocab = vocab
        loc = data_path + '/'

        # Captions
        self.captions = []
        with open(loc+'%s_caps.txt' % data_split, 'rb') as f:
            for line in f:
                self.captions.append(line.strip())

        # Image features
        self.images = np.load(loc+'%s_ims.npy' % data_split)
        self.length = len(self.captions)
        # rkiros data has redundancy in images, we divide by 5, 10crop doesn't
        if self.images.shape[0] != self.length:
            self.im_div = 5
        else:
            self.im_div = 1
        # the development set for coco is large and so validation would be slow
        if data_split == 'dev':
            self.length = 5000 
Example #28
Source File: data.py    From seismic-deeplearning with MIT License 6 votes vote down vote up
def add_section_depth_channels(sections_numpy):
    """Add 2 extra channels to a 1 channel section
    One channel is a linear sequence from 0 to 1 starting from the top of the section to the bottom
    The second channel is the product of the input channel and the 'depth' channel
    
    Args:
        sections_numpy (numpy array): 3D Matrix (NWH)Image tensor
    
    Returns:
        [pytorch tensor]: 3D image tensor
    """
    n, w, h = sections_numpy.shape
    image = np.zeros([3, n, w, h])
    image[0] = sections_numpy
    for row, const in enumerate(np.linspace(0, 1, h)):
        image[1, :, :, row] = const
    image[2] = image[0] * image[1]
    return np.swapaxes(image, 0, 1) 
Example #29
Source File: secure_camera.py    From WannaPark with GNU General Public License v3.0 6 votes vote down vote up
def get_car_image_plate_number(image_path, image_name):
  
	img = Image(cv2.imread(image_path,0), image_name)
	l_carsR = getCarsFromImage(img.img, carClassifier)
	for carR in l_carsR:
		car = Car(img.img, carR, plateCassifier)
		car.setPlateText(processPlateText(car, net))
		img.addCar(car)
	
	for car in img.cars:
		car.draw()
		if(not car.isPlateEmpty()):
			plate_number = car.plateText
		# imshow(car.carImg)
		x, y, w, h = car.carR.x, car.carR.y, car.carR.w, car.carR.h

	color_image = imread(image_path)
	return color_image[y:y+h, x:x+w], plate_number 
Example #30
Source File: visualize.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def draw_fps(pil_image, fps, fps_only_network):
    """Draw FPS information to image object.

    Args:
        pil_image (PIL.Image.Image): Image object to be draw FPS.
        fps (float): Entire inference FPS .
        fps_only_network (float): FPS of network only (not pre/post process).

    Returns:

    """
    font_size = 20
    font_size_sub = 14
    text_color = (200, 200, 200)
    text = "FPS: {:.1f}".format(fps)
    text_sub = "FPS (Network only): {:.1f}".format(fps_only_network)

    draw = PIL.ImageDraw.Draw(pil_image)
    font = PIL.ImageFont.truetype(FONT, font_size)
    font_sub = PIL.ImageFont.truetype(FONT, font_size_sub)
    draw.text((10, pil_image.height - font_size - font_size_sub - 5), text, fill=text_color, font=font)
    draw.text((10, pil_image.height - font_size_sub - 5), text_sub, fill=text_color, font=font_sub)