Python skimage.feature.peak_local_max() Examples

The following are 14 code examples of skimage.feature.peak_local_max(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.feature , or try the search function .
Example #1
Source File: getFeatures.py    From multi-object-tracking with GNU General Public License v3.0 6 votes vote down vote up
def getFeatures(img,bbox,use_shi=False):
    n_object = np.shape(bbox)[0]
    N = 0
    temp = np.empty((n_object,),dtype=np.ndarray)   # temporary storage of x,y coordinates
    for i in range(n_object):
        (xmin, ymin, boxw, boxh) = cv2.boundingRect(bbox[i,:,:].astype(int))
        roi = img[ymin:ymin+boxh,xmin:xmin+boxw]
        # cv2.imshow('roi',roi)
        if use_shi:
            corner_response = corner_shi_tomasi(roi)
        else:
            corner_response = corner_harris(roi)
        coordinates = peak_local_max(corner_response,num_peaks=20,exclude_border=2)
        coordinates[:,1] += xmin
        coordinates[:,0] += ymin
        temp[i] = coordinates
        if coordinates.shape[0] > N:
            N = coordinates.shape[0]
    x = np.full((N,n_object),-1)
    y = np.full((N,n_object),-1)
    for i in range(n_object):
        n_feature = temp[i].shape[0]
        x[0:n_feature,i] = temp[i][:,1]
        y[0:n_feature,i] = temp[i][:,0]
    return x,y 
Example #2
Source File: grasp.py    From ggcnn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def detect_grasps(q_img, ang_img, width_img=None, no_grasps=1):
    """
    Detect grasps in a GG-CNN output.
    :param q_img: Q image network output
    :param ang_img: Angle image network output
    :param width_img: (optional) Width image network output
    :param no_grasps: Max number of grasps to return
    :return: list of Grasps
    """
    local_max = peak_local_max(q_img, min_distance=20, threshold_abs=0.2, num_peaks=no_grasps)

    grasps = []
    for grasp_point_array in local_max:
        grasp_point = tuple(grasp_point_array)

        grasp_angle = ang_img[grasp_point]

        g = Grasp(grasp_point, grasp_angle)
        if width_img is not None:
            g.length = width_img[grasp_point]
            g.width = g.length/2

        grasps.append(g)

    return grasps 
Example #3
Source File: grasp.py    From mvp_grasp with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def detect_grasps(point_img, ang_img, width_img=None, no_grasps=1, ang_threshold=5, thresh_abs=0.5, min_distance=20):
    local_max = peak_local_max(point_img, min_distance=min_distance, threshold_abs=thresh_abs, num_peaks=no_grasps)

    grasps = []

    for grasp_point_array in local_max:
        grasp_point = tuple(grasp_point_array)

        grasp_angle = ang_img[grasp_point]
        if ang_threshold > 0:
            if grasp_angle > 0:
                grasp_angle = ang_img[grasp_point[0] - ang_threshold:grasp_point[0] + ang_threshold + 1,
                                      grasp_point[1] - ang_threshold:grasp_point[1] + ang_threshold + 1].max()
            else:
                grasp_angle = ang_img[grasp_point[0] - ang_threshold:grasp_point[0] + ang_threshold + 1,
                                      grasp_point[1] - ang_threshold:grasp_point[1] + ang_threshold + 1].min()

        g = Grasp(grasp_point, grasp_angle, value=point_img[grasp_point])
        if width_img is not None:
            g.length = width_img[grasp_point]
            g.width = g.length/2

        grasps.append(g)

    return grasps 
Example #4
Source File: mask_morphology.py    From NucleiDetectron with Apache License 2.0 6 votes vote down vote up
def skimage_watershed_segmentation(mask, kernel=k_3x3, k=1):
    # mask = cv.dilate(mask, kernel, iterations=k)

    distance = ndimage.distance_transform_edt(mask)
    local_maxi = peak_local_max(distance, indices=False, footprint=kernel, labels=mask)

    markers = measure.label(local_maxi)
    labels_ws = watershed(-distance, markers, mask=mask)

    if labels_ws.max() < 2:
        return [mask], labels_ws

    res_masks = []
    for idx in range(1,  labels_ws.max() + 1):
        m = labels_ws == idx
        if m.sum() > 20:
            res_masks.append(m.astype(np.uint8))
    return res_masks, labels_ws 
Example #5
Source File: mask_morphology.py    From NucleiDetectron with Apache License 2.0 6 votes vote down vote up
def skimage_random_walker_segmentation(mask, kernel=k_3x3, k=1):
    if mask.dtype != np.bool:
        mask = mask > 0

    distance = ndimage.distance_transform_edt(mask)
    local_maxi = peak_local_max(distance, indices=False, footprint=kernel, labels=mask)

    markers = measure.label(local_maxi)
    markers[~mask] = -1
    labels_rw = random_walker(mask, markers)

    if labels_rw.max() < 2:
        return [mask.astype(np.uint8)], labels_rw

    res_masks = []
    for idx in range(1,  labels_rw.max() + 1):
        m = labels_rw == idx
        if m.sum() > 20:
            res_masks.append(m.astype(np.uint8))
    return res_masks, labels_rw 
Example #6
Source File: test_blob_detection_filters.py    From HistomicsTK with Apache License 2.0 5 votes vote down vote up
def compare_maxima(input_im, gtruth_im, min_distance=10, threshold_abs=20):
    """Compares image maxima

    Compare that the maxima found in an image matches the maxima found in a ground truth image.
    This function is a wrapper around `skimage.feature.peak_local_max()`. It calls this function
    on both images that are passed as arguments, and asserts if the resulting maxima arrays
    returned by this function match.
    """
    gtruth_coordinates = _sort_list(peak_local_max(gtruth_im, min_distance=min_distance,
                                                   threshold_abs=threshold_abs))
    input_coordinates = _sort_list(peak_local_max(input_im, min_distance=min_distance,
                                                  threshold_abs=threshold_abs))
    np.testing.assert_array_equal(gtruth_coordinates, input_coordinates) 
Example #7
Source File: predict_multianimal.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def find_local_maxima(scmap, radius, threshold):
    grid = peak_local_max(
        scmap,
        min_distance=radius,
        threshold_abs=threshold,
        exclude_border=False,
        indices=False,
    )
    labels = measurements.label(grid)[0]
    xy = measurements.center_of_mass(grid, labels, range(1, np.max(labels) + 1))
    return np.asarray(xy, dtype=np.int).reshape((-1, 2)) 
Example #8
Source File: chessboard.py    From cvcalib with Apache License 2.0 5 votes vote down vote up
def find_candidates(greyscale_image, neighborhood_size=20, candidate_threshold=.5):
    corner_likelihood = compute_inital_corner_likelihood(greyscale_image)
    # TODO: the absolute threshold should be statistically determined based on actual checkerboard images
    candidates = peak_local_max(corner_likelihood, neighborhood_size, corner_likelihood.max() * candidate_threshold)
    return candidates 
Example #9
Source File: segmentation_labelling.py    From kaggle-heart with MIT License 5 votes vote down vote up
def breakup_region(component):
    distance = ndi.distance_transform_edt(component)
    skel = skeletonize(component)
    skeldist = distance*skel
    local_maxi = peak_local_max(skeldist, indices=False, footprint=disk(10))
    local_maxi=ndi.binary_closing(local_maxi,structure = disk(4),iterations = 2)
    markers = ndi.label(local_maxi)[0]
    labels = watershed(-distance, markers, mask=component)
    return(labels) 
Example #10
Source File: planner.py    From form2fit with MIT License 4 votes vote down vote up
def plan(self, suction_scores, place_scores, kit_descriptor_map, object_descriptor_map, suction_mask=None, place_mask=None, img1=None, img2=None):
        if suction_mask is not None:
            suction_scores[suction_mask[:, 0], suction_mask[:, 1]] = 0
        suction_coordinates = peak_local_max(suction_scores, min_distance=0, threshold_rel=0.1)

        if place_mask is not None:
            place_scores[place_mask[:, 0], place_mask[:, 1]] = 0
        place_coordinates = peak_local_max(place_scores, min_distance=0, threshold_rel=0.1)

        combinations = list(product(place_coordinates, suction_coordinates))
        num_rotations = len(kit_descriptor_map)
        B, D, H, W = object_descriptor_map.shape
        object_descriptor_map_flat = object_descriptor_map.view(B, D, H*W)

        distances = []
        rotation_idxs = []
        for place_uv, suction_uv in combinations:
            # index object descriptor map
            suction_uv_flat = torch.from_numpy(np.array((suction_uv[0]*W+suction_uv[1]))).long().cuda()
            object_descriptor = torch.index_select(object_descriptor_map_flat[0], 1, suction_uv_flat).unsqueeze(0)

            kit_descriptors = []
            for r in range(num_rotations):
                place_uv_rot = misc.rotate_uv(np.array([place_uv]), -(360/num_rotations)*r, H, W, cxcy=self.center)[0]
                place_uv_rot_flat = torch.from_numpy(np.array((place_uv_rot[0]*W+place_uv_rot[1]))).long().cuda()
                kit_descriptor_map_flat = kit_descriptor_map[r].view(kit_descriptor_map.shape[1], -1)
                kit_descriptors.append(torch.index_select(kit_descriptor_map_flat, 1, place_uv_rot_flat))
            kit_descriptors = torch.stack(kit_descriptors)

            # compute L2 distances
            diffs = object_descriptor - kit_descriptors
            l2_dists = diffs.pow(2).sum(1).sqrt()

            # store best across rotation
            best_rot_idx = l2_dists.argmin().item()
            l2_dist = l2_dists[best_rot_idx].item()
            distances.append(l2_dist)
            rotation_idxs.append(best_rot_idx)

        # compute best across candidates
        best_distance_idx = np.argmin(distances)
        best_place_uv, best_suction_uv = combinations[best_distance_idx]

        ret = {
            "best_distance_idx": best_distance_idx,
            "best_distance": distances[best_distance_idx],
            "best_rotation_idx": rotation_idxs[best_distance_idx],
            "best_place_uv": best_place_uv,
            "best_suction_uv": best_suction_uv,
        }

        return ret 
Example #11
Source File: min_distance_label.py    From starfish with MIT License 4 votes vote down vote up
def run(
            self,
            binary_mask_collection: BinaryMaskCollection,
            *args,
            **kwargs
    ) -> BinaryMaskCollection:
        assert len(binary_mask_collection) == 1
        mask = binary_mask_collection.uncropped_mask(0)

        # calculates the distance of every pixel to the nearest background (0) point
        distance: np.ndarray = distance_transform_edt(mask)

        footprint = np.ones(
            shape=(
                self._minimum_distance_z * 2 + 1,
                self._minimum_distance_xy * 2 + 1,
                self._minimum_distance_xy * 2 + 1,
            ),
            dtype=np.bool,
        )

        # boolean array marking local maxima, excluding any maxima within min_dist
        local_maximum: np.ndarray = peak_local_max(
            distance,
            exclude_border=self._exclude_border,
            indices=False,
            footprint=footprint,
            labels=np.asarray(mask),
        )

        # label the maxima for watershed
        markers, _ = label(local_maximum)

        # run watershed, using the distances in the thresholded image as basins.
        # Uses the original image as a mask, preventing any background pixels from being labeled
        labeled_array: np.ndarray = watershed(-distance, markers, mask=mask)

        return BinaryMaskCollection.from_label_array_and_ticks(
            labeled_array,
            binary_mask_collection._pixel_ticks,
            binary_mask_collection._physical_ticks,
            binary_mask_collection.log,
        ) 
Example #12
Source File: local_max_peak_finder.py    From starfish with MIT License 4 votes vote down vote up
def _compute_num_spots_per_threshold(self, img: np.ndarray) -> Tuple[np.ndarray, List[int]]:
        """Computes the number of detected spots for each threshold

        Parameters
        ----------
        img : np.ndarray
            The image in which to count spots

        Returns
        -------
        np.ndarray :
            thresholds
        List[int] :
            spot counts
        """

        # thresholds to search over
        thresholds = np.linspace(img.min(), img.max(), num=100)

        # number of spots detected at each threshold
        spot_counts = []

        # where we stop our threshold search
        stop_threshold = None

        if self.verbose and StarfishConfig().verbose:
            threshold_iter = tqdm(thresholds)
            print('Determining optimal threshold ...')
        else:
            threshold_iter = thresholds

        for stop_index, threshold in enumerate(threshold_iter):
            spots = peak_local_max(
                img,
                min_distance=self.min_distance,
                threshold_abs=threshold,
                exclude_border=False,
                indices=True,
                num_peaks=np.inf,
                footprint=None,
                labels=None
            )

            # stop spot finding when the number of detected spots falls below min_num_spots_detected
            if len(spots) <= self.min_num_spots_detected:
                stop_threshold = threshold
                if self.verbose:
                    print(f'Stopping early at threshold={threshold}. Number of spots fell below: '
                          f'{self.min_num_spots_detected}')
                break
            else:
                spot_counts.append(len(spots))

        if stop_threshold is None:
            stop_threshold = thresholds.max()

        if len(thresholds > 1):
            thresholds = thresholds[:stop_index]
            spot_counts = spot_counts[:stop_index]

        return thresholds, spot_counts 
Example #13
Source File: orient_pharynx.py    From tierpsy-tracker with MIT License 4 votes vote down vote up
def _pharynx_orient(worm_img, min_blob_area):#, min_dist_btw_peaks=5):
    #%%
    
    blur = cv2.GaussianBlur(worm_img,(5,5),0) 
    
    #ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_TOZERO+cv2.THRESH_OTSU)
    th, worm_mask = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    worm_cnt, cnt_area = binaryMask2Contour(worm_mask, min_blob_area=min_blob_area)
    
    worm_mask = np.zeros_like(worm_mask)
    cv2.drawContours(worm_mask, [worm_cnt.astype(np.int32)], 0, 1, -1)
    
    local_maxi = peak_local_max(blur,
                                indices=True, 
                                labels=worm_mask)
    
    #%%
    markers = np.zeros_like(worm_mask, dtype=np.uint8)
    kernel = np.ones((3,3),np.uint8)
    for x in local_maxi:
        markers[x[0], x[1]] = 1
    markers = cv2.dilate(markers,kernel,iterations = 1)
    markers = ndi.label(markers)[0]
    #strel = ndi.generate_binary_structure(3, 3)
    #markers = binary_dilation(markers, iterations=3)
    
    labels = watershed(-blur, markers, mask=worm_mask)
    props = regionprops(labels)
    
    #sort coordinates by area (the larger area is the head)
    props = sorted(props, key=lambda x: x.area, reverse=True)
    peaks_dict = {labels[x[0], x[1]]:x[::-1] for x in local_maxi}
    peaks_coords = np.array([peaks_dict[x.label] for x in props])
    
    if DEBUG:
        plt.figure()
        plt.subplot(1,3,1)
        plt.imshow(markers, cmap='gray', interpolation='none')
        
        plt.subplot(1,3,2)
        plt.imshow(labels)
        
        plt.subplot(1,3,3)
        plt.imshow(blur, cmap='gray', interpolation='none')
        
        for x,y in peaks_coords:
            plt.plot(x,y , 'or')
            
    if len(props) != 2:
        return np.full((2,2), np.nan) #invalid points return empty
        
    #%%
    return peaks_coords 
Example #14
Source File: watershed.py    From plantcv with MIT License 4 votes vote down vote up
def watershed_segmentation(rgb_img, mask, distance=10):
    """Uses the watershed algorithm to detect boundary of objects. Needs a marker file which specifies area which is
       object (white), background (grey), unknown area (black).

    Inputs:
    rgb_img             = image to perform watershed on needs to be 3D (i.e. np.shape = x,y,z not np.shape = x,y)
    mask                = binary image, single channel, object in white and background black
    distance            = min_distance of local maximum

    Returns:
    analysis_images     = list of output images

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param distance: int
    :return analysis_images: list
    """
    params.device += 1

    # Store debug mode
    debug = params.debug
    params.debug = None

    dist_transform = cv2.distanceTransformWithLabels(mask, cv2.DIST_L2, maskSize=0)[0]

    localMax = peak_local_max(dist_transform, indices=False, min_distance=distance, labels=mask)

    markers = ndi.label(localMax, structure=np.ones((3, 3)))[0]
    dist_transform1 = -dist_transform
    labels = watershed(dist_transform1, markers, mask=mask)

    img1 = np.copy(rgb_img)

    for x in np.unique(labels):
        rand_color = color_palette(len(np.unique(labels)))
        img1[labels == x] = rand_color[x]

    img2 = apply_mask(img1, mask, 'black')

    joined = np.concatenate((img2, rgb_img), axis=1)

    estimated_object_count = len(np.unique(markers)) - 1

    # Reset debug mode
    params.debug = debug
    if params.debug == 'print':
        print_image(dist_transform, os.path.join(params.debug_outdir, str(params.device) + '_watershed_dist_img.png'))
        print_image(joined, os.path.join(params.debug_outdir, str(params.device) + '_watershed_img.png'))
    elif params.debug == 'plot':
        plot_image(dist_transform, cmap='gray')
        plot_image(joined)

    outputs.add_observation(variable='estimated_object_count', trait='estimated object count',
                            method='plantcv.plantcv.watershed', scale='none', datatype=int,
                            value=estimated_object_count, label='none')

    # Store images
    outputs.images.append([dist_transform, joined])

    return joined