Python skimage.segmentation.slic() Examples

The following are 22 code examples of skimage.segmentation.slic(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.segmentation , or try the search function .
Example #1
Source File: spectral_roi.py    From Pic-Numero with MIT License 7 votes vote down vote up
def experiment_with_parameters():
    '''
    Apply spectral clustering to test wheat image using k-means clustering with
    different values of k and different compactness values.

    Saves the results to the Clusters folder for inspection.
    '''
    img = misc.imread("../Assets/wheat.png")

    compactness_values = [30, 50, 70, 100, 200, 300, 500, 700, 1000]
    n_segments_values = [3,4,5,6,7,8,9,10]

    for compactness_val in compactness_values:
        for n in n_segments_values:
            labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n)
            out1 = color.label2rgb(labels1, img, kind='overlay')

            fig, ax = plt.subplots()
            ax.imshow(out1, interpolation='nearest')
            ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n))
            plt.savefig("../Clusters/c{}_k{}.png".format(compactness_val, n))
            plt.close(fig) 
Example #2
Source File: utils.py    From PlaneNet with MIT License 6 votes vote down vote up
def getSuperpixels(depth, normal, width, height, numPlanes=50, numGlobalPlanes = 10):
    depth = np.expand_dims(depth, -1)

    urange = (np.arange(width, dtype=np.float32) / (width + 1) - 0.5) / focalLength * 641
    urange = np.tile(np.reshape(urange, [1, -1]), [height, 1])
    vrange = (np.arange(height, dtype=np.float32) / (height + 1) - 0.5) / focalLength * 481
    vrange = np.tile(np.reshape(vrange, [-1, 1]), [1, width])
    
    ranges = np.stack([urange, np.ones([height, width]), -vrange], axis=2)
    #ranges = np.expand_dims(ranges, 0)

    planeImage = np.sum(normal * ranges, axis=2, keepdims=True) * depth * normal
    planeImage = planeImage / 10 * 1000

    superpixels = segmentation.slic(planeImage, compactness=30, n_segments=400)
    g = graph.rag_mean_color(planeImage, superpixels, mode='similarity')
    planeSegmentation = graph.cut_normalized(superpixels, g)
    return planeSegmentation, superpixels 
Example #3
Source File: utils_backup.py    From PlaneNet with MIT License 6 votes vote down vote up
def getSuperpixels(depth, normal, width, height, numPlanes=50, numGlobalPlanes = 10):
	depth = np.expand_dims(depth, -1)

	urange = (np.arange(width, dtype=np.float32) / (width + 1) - 0.5) / focalLength * 641
	urange = np.tile(np.reshape(urange, [1, -1]), [height, 1])
	vrange = (np.arange(height, dtype=np.float32) / (height + 1) - 0.5) / focalLength * 481
	vrange = np.tile(np.reshape(vrange, [-1, 1]), [1, width])
	
		ranges = np.stack([urange, np.ones([height, width]), -vrange], axis=2)
		#ranges = np.expand_dims(ranges, 0)

		planeImage = np.sum(normal * ranges, axis=2, keepdims=True) * depth * normal
		planeImage = planeImage / 10 * 1000

		superpixels = segmentation.slic(planeImage, compactness=30, n_segments=400)
		g = graph.rag_mean_color(planeImage, superpixels, mode='similarity')
		planeSegmentation = graph.cut_normalized(superpixels, g)
		return planeSegmentation, superpixels 
Example #4
Source File: geometric.py    From vidaug with MIT License 6 votes vote down vote up
def __call__(self, clip):
        is_PIL = isinstance(clip[0], PIL.Image.Image)
        if is_PIL:
            clip = [np.asarray(img) for img in clip]

        # TODO this results in an error when n_segments is 0
        replace_samples = np.tile(np.array([self.p_replace]), self.n_segments)
        avg_image = np.mean(clip, axis=0)
        segments = segmentation.slic(avg_image, n_segments=self.n_segments,
                                     compactness=10)

        if not np.max(replace_samples) == 0:
            print("Converting")
            clip = [self._apply_segmentation(img, replace_samples, segments) for img in clip]

        if is_PIL:
            return [PIL.Image.fromarray(img) for img in clip]
        else:
            return clip 
Example #5
Source File: geometric.py    From vidaug with MIT License 6 votes vote down vote up
def _apply_segmentation(self, image, replace_samples, segments):
        nb_channels = image.shape[2]
        image_sp = np.copy(image)
        for c in range(nb_channels):
            # segments+1 here because otherwise regionprops always misses
            # the last label
            regions = measure.regionprops(segments + 1,
                                          intensity_image=image[..., c])
            for ridx, region in enumerate(regions):
                # with mod here, because slic can sometimes create more 
                # superpixel than requested. replace_samples then does 
                # not have enough values, so we just start over with the
                # first one again.
                if replace_samples[ridx % len(replace_samples)] == 1:
                    mean_intensity = region.mean_intensity
                    image_sp_c = image_sp[..., c]
                    image_sp_c[segments == ridx] = mean_intensity

        return image_sp 
Example #6
Source File: spectral_roi.py    From Pic-Numero with MIT License 6 votes vote down vote up
def spectral_cluster(filename, compactness_val=30, n=6):
    '''
    Apply spectral clustering to a given image using k-means clustering and
    display results.

    Args:
        filename: name of the image to segment.

        compactness_val: Controls the "boxyness" of each segment. Higher values
          mean a more boxed shape.

        n = number of clusters.
     '''
    img = misc.imread(filename)
    labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n)
    out1 = color.label2rgb(labels1, img, kind='overlay', colors=['red','green','blue','cyan','magenta','yellow'])

    fig, ax = plt.subplots()
    ax.imshow(out1, interpolation='nearest')
    ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n))
    plt.show() 
Example #7
Source File: RAG_threshold.py    From Pic-Numero with MIT License 6 votes vote down vote up
def experiment_with_parameters():
    img = misc.imread("wheat.png")

    compactness_values = [30, 50, 70, 100, 200, 300, 500, 700, 1000]
    n_segments_values = [3,4,5,6,7,8,9,10]

    for compactness_val in compactness_values:
        for n in n_segments_values:
            labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n)
            out1 = color.label2rgb(labels1, img, kind='overlay')

            fig, ax = plt.subplots()
            ax.imshow(out1, interpolation='nearest')
            ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n))
            plt.savefig("RAG/c{}_k{}.png".format(compactness_val, n))
            plt.close(fig) 
Example #8
Source File: to_superpixels.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __call__(self, img):
        img = img.permute(1, 2, 0)
        h, w, c = img.size()

        seg = slic(img.to(torch.double).numpy(), **self.kwargs)
        seg = torch.from_numpy(seg)

        x = scatter_mean(img.view(h * w, c), seg.view(h * w), dim=0)

        pos_y = torch.arange(h, dtype=torch.float)
        pos_y = pos_y.view(-1, 1).repeat(1, w).view(h * w)
        pos_x = torch.arange(w, dtype=torch.float)
        pos_x = pos_x.view(1, -1).repeat(h, 1).view(h * w)

        pos = torch.stack([pos_x, pos_y], dim=-1)
        pos = scatter_mean(pos, seg.view(h * w), dim=0)

        data = Data(x=x, pos=pos)

        if self.add_seg:
            data.seg = seg.view(1, h, w)

        if self.add_img:
            data.img = img.permute(2, 0, 1).view(1, c, h, w)

        return data 
Example #9
Source File: nlc.py    From videoseg with MIT License 5 votes vote down vote up
def superpixels(im, maxsp=200, vis=False, redirect=False):
    """
    Get Slic Superpixels
    Input: im: (h,w,c) or (n,h,w,c): 0-255: np.uint8: RGB
    Output: sp: (h,w) or (n,h,w): 0-indexed regions, #regions <= maxsp
    """
    sTime = time.time()
    if im.ndim < 4:
        im = im[None, ...]
    sp = np.zeros(im.shape[:3], dtype=np.int)
    for i in range(im.shape[0]):
        # slic needs im: float in [0,1]
        sp[i] = slic(im[i].astype(np.float) / 255., n_segments=maxsp, sigma=5)
        if not redirect:
            sys.stdout.write('Superpixel computation: [% 5.1f%%]\r' %
                                (100.0 * float((i + 1) / im.shape[0])))
            sys.stdout.flush()
    eTime = time.time()
    print('Superpixel computation finished: %.2f s' % (eTime - sTime))

    if vis and False:
        # TODO: set directory to save
        from skimage.segmentation import mark_boundaries
        for i in range(im.shape[0]):
            Image.fromarray((mark_boundaries(im[i], sp[i]))).save('.jpg')

    if im.ndim < 4:
        return sp[0]
    return sp 
Example #10
Source File: superpixels.py    From pyImSegm with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def segment_slic_img3d_gray(im, sp_size=50, relative_compact=0.1,
                            space=IMAGE_SPACING):
    """ segmentation by SLIC superpixels using originla SLIC implementation

    :param ndarray im: input 3D grascale image
    :param int sp_size: superpixel initial size
    :param float relative_compact: relative regularisation in range (0, 1)
        where 0 is for free form and 1 for nearly rectangular superpixels
    :param tuple(int,int,int) space: spacing in 3d image may not be equal
    :return ndarray:

    >>> np.random.seed(0)
    >>> img = np.random.random((100, 100, 10))
    >>> slic = segment_slic_img3d_gray(img, 20, 0.2, (1, 1, 5))
    >>> slic.shape
    (100, 100, 10)
    """
    logging.debug('Init SLIC superpixels 3d Gray clustering with params'
                  ' size=%i and regul=%f for image dims %r',
                  sp_size, relative_compact, im.shape)
    nb_pixels = np.prod(im.shape)
    sp_size = np.prod(sp_size / np.asarray(space, dtype=np.float32) * min(space))
    # set native SLIC parameters
    slic_nb_sp = int(nb_pixels / sp_size)
    # slic_compact = int((sp_size * relative_compact) ** 1.5)
    slic_compact = int((sp_size * relative_compact) ** 1.5)
    logging.debug('Starting SLIC superpixels clustering with params NB=%i and '
                  'compat=%f and spacing=%r', slic_nb_sp, slic_compact, space)
    # run SLIC segmentation
    # slic_segments = SLIC.slic_n(np.array(im), slic_nb_sp, slic_compact)
    slic_segments = ski_segm.slic(np.array(im), n_segments=slic_nb_sp,
                                  compactness=slic_compact, multichannel=False,
                                  spacing=space, sigma=1)
    logging.debug('SLIC superpixels estimated.')
    # slic_segments, _, _ = ski_segm.relabel_sequential(slic_segments)
    # fix: unconnected segments - [ndimage.label(slic==i)[1]
    #                              for i in range(slic.max() + 1)]
    slic_segments = measure.label(slic_segments)
    return np.array(slic_segments) 
Example #11
Source File: MR.py    From mr_saliency with GNU General Public License v2.0 5 votes vote down vote up
def __MR_superpixel(self,img):
        return slic(img,self.superpixel_parameters['segs'],
                    self.superpixel_parameters['compactness'],
                    self.superpixel_parameters['max_iter'],
                    self.superpixel_parameters['sigma'],
                    self.superpixel_parameters['spacing'],
                    self.superpixel_parameters['multichannel'],
                    self.superpixel_parameters['convert2lab'],
                    self.superpixel_parameters['enforce_connectivity'],
                    self.superpixel_parameters['min_size_factor'],
                    self.superpixel_parameters['max_size_factor'],
                    self.superpixel_parameters['slic_zero']) 
Example #12
Source File: cellularity_detection_superpixels.py    From HistomicsTK with Apache License 2.0 5 votes vote down vote up
def set_superpixel_mask(self):
        """Use Simple Linear Iterative Clustering (SLIC) to get superpixels."""
        # Get superpixel size and number
        spixel_size = self.cd.spixel_size_baseMag * (
            self.cd.MAG / self.cd.slide_info['magnification'])
        n_spixels = int(
            self.tissue_rgb.shape[0] * self.tissue_rgb.shape[1] / spixel_size)

        # get superpixel mask
        # optionally use grayscale instead of RGB -- seems more robust to
        # color variations and sometimes gives better results
        if self.cd.use_grayscale:
            self.spixel_mask = slic(
                rgb2gray(self.tissue_rgb), n_segments=n_spixels,
                compactness=self.cd.compactness)
        else:
            self.spixel_mask = slic(
                self.tissue_rgb, n_segments=n_spixels,
                compactness=self.cd.compactness)

        # restrict to tissue mask
        tmask = resize(
            self.tissue_mask, output_shape=self.spixel_mask.shape,
            order=0, preserve_range=True, anti_aliasing=False)
        self.spixel_mask[tmask == 0] = 0

    # ========================================================================= 
Example #13
Source File: RAG_threshold.py    From Pic-Numero with MIT License 5 votes vote down vote up
def extract_roi(img, labels_to_keep=[1,2]):
    label_img = segmentation.slic(img, compactness=30, n_segments=6)
    labels = np.unique(label_img);print(labels)
    gray = rgb2gray(img);

    for label in labels:
        if(label not in labels_to_keep):
            logicalIndex = (label_img == label)
            gray[logicalIndex] = 0;

    Display.show_image(gray)
    io.imsave("grayy.png", gray) 
Example #14
Source File: RAG_threshold.py    From Pic-Numero with MIT License 5 votes vote down vote up
def main():
    img = misc.imread("wheat.png")

    # labels1 = segmentation.slic(img, compactness=100, n_segments=9)
    labels1 = segmentation.slic(img, compactness=50, n_segments=4)
    out1 = color.label2rgb(labels1, img, kind='overlay')
    print(labels1.shape)

    g = graph.rag_mean_color(img, labels1)
    labels2 = graph.cut_threshold(labels1, g, 29)
    out2 = color.label2rgb(labels2, img, kind='overlay')

    # get roi
    # logicalIndex = (labels2 != 1)
    # gray = rgb2gray(img);
    # gray[logicalIndex] = 0;


    plt.figure()
    io.imshow(out1)
    plt.figure()
    io.imshow(out2)
    io.show() 
Example #15
Source File: spectral_roi.py    From Pic-Numero with MIT License 5 votes vote down vote up
def extract_roi(img, labels_to_keep=[1,2]):
    '''
    Given a wheat image, this method returns an image containing only the region
    of interest.

    Args:
        img: input image.

        labels_to_keep: cluster labels to be kept in image while pixels
            belonging to clusters besides these ones are removed.

    Return:
        roi_img: Input image containing only the region
        of interest.
    '''
    label_img = segmentation.slic(img, compactness=30, n_segments=6)
    labels = np.unique(label_img);print(labels)
    gray = rgb2gray(img);

    for label in labels:
        if(label not in labels_to_keep):
            logicalIndex = (label_img == label)
            gray[logicalIndex] = 0;

    #Display.show_image(gray)
    return gray 
Example #16
Source File: slico.py    From graph-based-image-classification with MIT License 5 votes vote down vote up
def slico(image, num_segments=NUM_SEGMENTS, compactness=COMPACTNESS,
          max_iterations=MAX_ITERATIONS, sigma=SIGMA,
          min_size_factor=MIN_SIZE_FACTOR, max_size_factor=MAX_SIZE_FACTOR,
          enforce_connectivity=CONNECTIVITY):
    """Segments an image using k-means clustering in Color-(x,y,z) space.

    Args:
        image: The image.
        num_segments: The (approiximate) number of segments in the segmented
          output image (optional).
        compactness: Initial value to balance color-space proximity and
          image-space-proximity. Higher values give more weight to image-space
          proximity (optional).
        max_iterations: Maximum number of iterations of k-means.
        sigma: Width of Gaussian kernel used in preprocessing (optional).
        min_size_factor: Proportion of the minimum segment size to be removed
          with respect to the supposed segment size
          `depth*width*height/num_segments` (optional).
        max_size_factor: Proportion of the maximum connected segment size
          (optional).
        enforce_connectivitiy: Whether the generated segments are connected or
          not (optional).

    Returns:
        Segmentation algorithm that takes a single image as argument.
    """

    image = tf.cast(image, tf.uint8)

    def _slico(image):
        segmentation = skimage_slic(image, num_segments, compactness,
                                    max_iterations, sigma,
                                    min_size_factor=min_size_factor,
                                    max_size_factor=max_size_factor,
                                    enforce_connectivity=enforce_connectivity,
                                    slic_zero=True)
        return segmentation.astype(np.int32)

    return tf.py_func(_slico, [image], tf.int32, stateful=False, name='slico') 
Example #17
Source File: slic.py    From graph-based-image-classification with MIT License 5 votes vote down vote up
def slic(image, num_segments=NUM_SEGMENTS, compactness=COMPACTNESS,
         max_iterations=MAX_ITERATIONS, sigma=SIGMA,
         min_size_factor=MIN_SIZE_FACTOR, max_size_factor=MAX_SIZE_FACTOR,
         enforce_connectivity=CONNECTIVITY):
    """Segments an image using k-means clustering in Color-(x,y,z) space.

    Args:
        image: The image.
        num_segments: The (approiximate) number of segments in the segmented
          output image (optional).
        compactness: Balances color-space proximity and image-space-proximity.
          Higher values give more weight to image-space proximity (optional).
        max_iterations: Maximum number of iterations of k-means.
        sigma: Width of Gaussian kernel used in preprocessing (optional).
        min_size_factor: Proportion of the minimum segment size to be removed
          with respect to the supposed segment size
          `depth*width*height/num_segments` (optional).
        max_size_factor: Proportion of the maximum connected segment size
          (optional).
        enforce_connectivitiy: Whether the generated segments are connected or
          not (optional).

    Returns:
        Integer mask indicating segment labels.
    """

    image = tf.cast(image, tf.uint8)

    def _slic(image):
        segmentation = skimage_slic(image, num_segments, compactness,
                                    max_iterations, sigma,
                                    min_size_factor=min_size_factor,
                                    max_size_factor=max_size_factor,
                                    enforce_connectivity=enforce_connectivity,
                                    slic_zero=False)
        return segmentation.astype(np.int32)

    return tf.py_func(_slic, [image], tf.int32, stateful=False, name='slic') 
Example #18
Source File: to_superpixels.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, add_seg=False, add_img=False, **kwargs):

        if slic is None:
            raise ImportError('`ToSlic` requires `scikit-image`.')

        self.add_seg = add_seg
        self.add_img = add_img
        self.kwargs = kwargs 
Example #19
Source File: superpixels.py    From pyImSegm with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def segment_slic_img2d(img, sp_size=50, relative_compact=0.1, slico=False):
    """ segmentation by SLIC superpixels using original SLIC implementation

    :param ndarray img: input color image
    :param int sp_size: superpixel initial size
    :param float relative_compact: relative regularisation in range (0, 1)
        where 0 is for free form and 1 for nearly rectangular superpixels
    :param bool slico: whether use parameter free version ASLIC/SLICO
    :return ndarray: segmentation

    >>> np.random.seed(0)
    >>> img = np.random.random((100, 150, 3))
    >>> slic = segment_slic_img2d(img, 20, 0.2)
    >>> slic.shape
    (100, 150)
    >>> img = np.random.random((150, 100))
    >>> slic = segment_slic_img2d(img, 20, 0.2)
    >>> slic.shape
    (150, 100)
    """
    logging.debug('Init SLIC superpixels 2d RGB clustering with params size=%i and'
                  ' regul=%f for image dims %r', sp_size, relative_compact, img.shape)
    nb_pixels = np.prod(img.shape[:2])

    if not isinstance(img, np.ndarray):
        img = np.array(img)
    if img.ndim == 2:  # duplicate channels to be like RGB
        img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
    # scale image values
    if img.min() != 0. or img.max() != 1.:
        img = (img - img.min()) / float(img.max() - img.min())

    # set native SLIC parameters
    slic_nb_spx = int(nb_pixels / (sp_size ** 2))
    slic_compact = (sp_size * relative_compact) ** 1.5
    logging.debug('Starting SLIC with params NB=%i & compat=%f for image %r',
                  slic_nb_spx, slic_compact, img.shape)
    # run SLIC segmentation
    slic_segments = ski_segm.slic(img, n_segments=slic_nb_spx,
                                  compactness=slic_compact,
                                  sigma=1, enforce_connectivity=True,
                                  slic_zero=slico)
    logging.debug('SLIC finished')
    # slic_segments, _, _ = ski_segm.relabel_sequential(slic_segments)
    # fix: unconnected segments - [ndimage.label(slic==i)[1]
    #                              for i in range(slic.max() + 1)]
    # slic_segments = measure.label(slic_segments, neighbors=4)
    return np.array(slic_segments) 
Example #20
Source File: ncut_prepare.py    From segmentator with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def norm_grap_cut(image, max_edge=10000000, max_rec=4, compactness=2,
                  nrSupPix=2000):
    """Normalized graph cut wrapper for 2D numpy arrays.

    Parameters
    ----------
        image: np.ndarray (2D)
            Volume histogram.
        max_edge: float
            The maximum possible value of an edge in the RAG. This corresponds
            to an edge between identical regions. This is used to put self
            edges in the RAG.
        compactness: float
            From skimage slic_superpixels.py slic function:
            Balances color proximity and space proximity. Higher values give
            more weight to space proximity, making superpixel shapes more
            square/cubic. This parameter depends strongly on image contrast and
            on the shapes of objects in the image.
        nrSupPix: int, positive
            The (approximate) number of superpixels in the region adjacency
            graph.

    Returns
    -------
        labels2, labels1: np.ndarray (2D)
            Segmented volume histogram mask image. Each label has a unique
            identifier.

    """
    # scale for uint8 conversion
    image = np.round(255 / image.max() * image)
    image = image.astype('uint8')

    # scikit implementation expects rgb format (shape: NxMx3)
    image = np.tile(image, (3, 1, 1))
    image = np.transpose(image, (1, 2, 0))

    labels1 = slic(image, compactness=compactness, n_segments=nrSupPix,
                   sigma=2)
    # region adjacency graph (rag)
    g = graph.rag_mean_color(img, labels1, mode='similarity_and_proximity')
    labels2 = graph.cut_normalized(labels1, g, max_edge=max_edge,
                                   num_cuts=1000, max_rec=max_rec)
    return labels2, labels1 
Example #21
Source File: segmentation.py    From ViolenceDetection with Apache License 2.0 4 votes vote down vote up
def _augment_images(self, images, random_state, parents, hooks):
        #import time
        nb_images = len(images)
        #p_replace_samples = self.p_replace.draw_samples((nb_images,), random_state=random_state)
        n_segments_samples = self.n_segments.draw_samples((nb_images,), random_state=random_state)
        seeds = random_state.randint(0, 10**6, size=(nb_images,))
        for i in sm.xrange(nb_images):
            #replace_samples = ia.new_random_state(seeds[i]).binomial(1, p_replace_samples[i], size=(n_segments_samples[i],))
            # TODO this results in an error when n_segments is 0
            replace_samples = self.p_replace.draw_samples((n_segments_samples[i],), random_state=ia.new_random_state(seeds[i]))
            #print("n_segments", n_segments_samples[i], "replace_samples.shape", replace_samples.shape)
            #print("p", p_replace_samples[i])
            #print("replace_samples", replace_samples)

            if np.max(replace_samples) == 0:
                # not a single superpixel would be replaced by its average color,
                # i.e. the image would not be changed, so just keep it
                pass
            else:
                image = images[i]

                orig_shape = image.shape
                if self.max_size is not None:
                    size = max(image.shape[0], image.shape[1])
                    if size > self.max_size:
                        resize_factor = self.max_size / size
                        new_height, new_width = int(image.shape[0] * resize_factor), int(image.shape[1] * resize_factor)
                        image = ia.imresize_single_image(image, (new_height, new_width), interpolation=self.interpolation)

                #image_sp = np.random.randint(0, 255, size=image.shape).astype(np.uint8)
                image_sp = np.copy(image)
                #time_start = time.time()
                segments = segmentation.slic(image, n_segments=n_segments_samples[i], compactness=10)
                #print("seg", np.min(segments), np.max(segments), n_segments_samples[i])
                #print("segmented in %.4fs" % (time.time() - time_start))
                #print(np.bincount(segments.flatten()))
                #time_start = time.time()
                nb_channels = image.shape[2]
                for c in sm.xrange(nb_channels):
                    # segments+1 here because otherwise regionprops always misses
                    # the last label
                    regions = measure.regionprops(segments+1, intensity_image=image[..., c])
                    for ridx, region in enumerate(regions):
                        # with mod here, because slic can sometimes create more superpixel
                        # than requested. replace_samples then does not have enough
                        # values, so we just start over with the first one again.
                        if replace_samples[ridx % len(replace_samples)] == 1:
                            #print("changing region %d of %d, channel %d, #indices %d" % (ridx, np.max(segments), c, len(np.where(segments == ridx)[0])))
                            mean_intensity = region.mean_intensity
                            image_sp_c = image_sp[..., c]
                            image_sp_c[segments == ridx] = mean_intensity
                #print("colored in %.4fs" % (time.time() - time_start))

                if orig_shape != image.shape:
                    image_sp = ia.imresize_single_image(image_sp, orig_shape[0:2], interpolation=self.interpolation)

                images[i] = image_sp
        return images 
Example #22
Source File: ace.py    From ACE with MIT License 4 votes vote down vote up
def create_patches(self, method='slic', discovery_images=None,
                     param_dict=None):
    """Creates a set of image patches using superpixel methods.

    This method takes in the concept discovery images and transforms it to a
    dataset made of the patches of those images.

    Args:
      method: The superpixel method used for creating image patches. One of
        'slic', 'watershed', 'quickshift', 'felzenszwalb'.
      discovery_images: Images used for creating patches. If None, the images in
        the target class folder are used.

      param_dict: Contains parameters of the superpixel method used in the form
                of {'param1':[a,b,...], 'param2':[z,y,x,...], ...}. For instance
                {'n_segments':[15,50,80], 'compactness':[10,10,10]} for slic
                method.
    """
    if param_dict is None:
      param_dict = {}
    dataset, image_numbers, patches = [], [], []
    if discovery_images is None:
      raw_imgs = self.load_concept_imgs(
          self.target_class, self.num_discovery_imgs)
      self.discovery_images = raw_imgs
    else:
      self.discovery_images = discovery_images
    if self.num_workers:
      pool = multiprocessing.Pool(self.num_workers)
      outputs = pool.map(
          lambda img: self._return_superpixels(img, method, param_dict),
          self.discovery_images)
      for fn, sp_outputs in enumerate(outputs):
        image_superpixels, image_patches = sp_outputs
        for superpixel, patch in zip(image_superpixels, image_patches):
          dataset.append(superpixel)
          patches.append(patch)
          image_numbers.append(fn)
    else:
      for fn, img in enumerate(self.discovery_images):
        image_superpixels, image_patches = self._return_superpixels(
            img, method, param_dict)
        for superpixel, patch in zip(image_superpixels, image_patches):
          dataset.append(superpixel)
          patches.append(patch)
          image_numbers.append(fn)
    self.dataset, self.image_numbers, self.patches =\
    np.array(dataset), np.array(image_numbers), np.array(patches)