Python skimage.feature.canny() Examples

The following are 17 code examples of skimage.feature.canny(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.feature , or try the search function .
Example #1
Source File: rectify.py    From facade-segmentation with MIT License 6 votes vote down vote up
def _extract_lines(img, edges=None, mask=None, min_line_length=20, max_line_gap=3):
    global __i__
    __i__ += 1

    if edges is None:
        edges = canny(rgb2grey(img))
    if mask is not None:
        edges = edges & mask

    # figure()
    # subplot(131)
    # imshow(img)
    # subplot(132)
    #vimshow(edges)
    # subplot(133)
    # if mask is not None:
    #     imshow(mask, cmap=cm.gray)
    # savefig('/home/shared/Projects/Facades/src/data/for-labelme/debug/foo/{:06}.jpg'.format(__i__))

    lines = np.array(probabilistic_hough_line(edges, line_length=min_line_length, line_gap=max_line_gap))

    return lines 
Example #2
Source File: estimate_sharpness.py    From DeepFaceLab with GNU General Public License v3.0 6 votes vote down vote up
def compute(image):
    # type: (numpy.ndarray) -> float
    """Compute the sharpness metric for the given data."""

    # convert the image to double for further processing
    image = image.astype(np.float64)

    # edge detection using canny and sobel canny edge detection is done to
    # classify the blocks as edge or non-edge blocks and sobel edge
    # detection is done for the purpose of edge width measurement.
    from skimage.feature import canny
    canny_edges = canny(image)
    sobel_edges = sobel(image)

    # edge width calculation
    marziliano_widths = marziliano_method(sobel_edges, image)

    # sharpness metric calculation
    return _calculate_sharpness_metric(image, canny_edges, marziliano_widths) 
Example #3
Source File: imgOp.py    From TextDetector with GNU General Public License v3.0 6 votes vote down vote up
def image2edge(img, mode = None):
	'''_image2edge(img)

	convert image to edge map
	img: 2D_numpy_array 
	Return 2D_numpy_array '''
        if mode == 'canny':
                img = image_norm(img)
                edgeim = numpy.uint8(canny(img))*255
                return edgeim 
        if mode == 'sobel':
                img = image_norm(img)
                edgeim = sobel(img)*255
                return edgeim 
	img = numpy.float32(img)
	im1 = scipy.ndimage.filters.sobel(img,axis=0,mode='constant',cval =0.0)
	im2 = scipy.ndimage.filters.sobel(img,axis=1,mode='constant',cval =0.0)
	return (abs(im1) + abs(im2))/2 
Example #4
Source File: setup.py    From multimodal-vae-public with MIT License 6 votes vote down vote up
def build_edge_dataset(in_dir, out_dir, sigma=3):
    """Generate a dataset of (canny) edge-detected images.

    @param in_dir: string
                   input directory of images.
    @param out_dir: string
                    output directory of images.
    @param sigma: float (default: 3)
                  smoothness for edge detection.
    """
    image_paths = os.listdir(in_dir)
    n_images = len(image_paths)
    for i, image_path in enumerate(image_paths):
        print('Building edge-detected dataset: [%d/%d] images.' % (i + 1, n_images))
        image_full_path = os.path.join(in_dir, image_path)
        image = Image.open(image_full_path).convert('L')
        image_npy = np.asarray(image).astype(np.float) / 255.
        image_npy = feature.canny(image_npy, sigma=sigma)
        image_npy = image_npy.astype(np.uint8) * 255
        image = Image.fromarray(image_npy)
        image.save(os.path.join(out_dir, image_path)) 
Example #5
Source File: planar_imaging.py    From pylinac with MIT License 6 votes vote down vote up
def _phantom_radius_calc(self) -> float:
        """Determine the radius of the phantom.

        The radius is determined by finding the largest of the detected blobs of the canny image and taking
        its major axis length.

        Returns
        -------
        radius : float
            The radius of the phantom in pixels. The actual value is not important; it is used for scaling the
            distances to the low and high contrast ROIs.
        """
        big_circle_idx = np.argsort([self._regions[roi].major_axis_length for roi in self._blobs])[-1]
        circle_roi = self._regions[self._blobs[big_circle_idx]]
        radius = circle_roi.major_axis_length / 3.35
        return radius 
Example #6
Source File: planar_imaging.py    From pylinac with MIT License 6 votes vote down vote up
def _phantom_angle_calc(self) -> float:
        """Determine the angle of the phantom.

        This is done by searching for square-like boxes of the canny image. There are usually two: one lead and
        one copper. The box with the highest intensity (lead) is identified. The angle from the center of the lead
        square bounding box and the phantom center determines the phantom angle.

        Returns
        -------
        angle : float
            The angle in radians.
        """
        circle = CollapsedCircleProfile(self.phantom_center, self.phantom_radius * 0.79, self.image,
                                        width_ratio=0.04, ccw=True)
        circle.ground()
        circle.filter(size=0.01)
        peak_idx = circle.find_fwxm_peaks(threshold=0.6, max_number=1)[0]
        shift_percent = peak_idx / len(circle.values)
        shift_radians = shift_percent * 2 * np.pi
        shift_radians_corrected = 2*np.pi - shift_radians
        return np.degrees(shift_radians_corrected) 
Example #7
Source File: planar_imaging.py    From pylinac with MIT License 6 votes vote down vote up
def _phantom_center_calc(self) -> Point:
        """Determine the phantom center.

        This is done by searching for circular ROIs of the canny image. Those that are circular and roughly the
        same size as the biggest circle ROI are all sampled for the center of the bounding box. The values are
        averaged over all the detected circles to give a more robust value.

        Returns
        -------
        center : Point
        """
        if self._phantom_center is not None:
            return self._phantom_center
        circles = [roi for roi in self._blobs if
                   np.isclose(self._regions[roi].major_axis_length, self.phantom_radius * 3.35, rtol=0.3)]

        # get average center of all circles
        circle_rois = [self._regions[roi] for roi in circles]
        y = np.mean([bbox_center(roi).y for roi in circle_rois])
        x = np.mean([bbox_center(roi).x for roi in circle_rois])
        return Point(x, y) 
Example #8
Source File: FCN_CrackAnalysis.py    From FCN_for_crack_recognition with MIT License 5 votes vote down vote up
def get_edges(self, detector='sobel'):
        if detector == 'sobel':
            img = filters.sobel(self.img_gray)
        elif detector == 'canny1':
            img = feature.canny(self.img_gray, sigma=1)
        elif detector == 'canny3':
            img = feature.canny(self.img_gray, sigma=3)
        elif detector == 'scharr':
            img = filters.scharr(self.img_gray)
        elif detector == 'prewitt':
            img = filters.prewitt(self.img_gray)
        elif detector == 'roberts':
            img = filters.roberts(self.img_gray)
        return img 
Example #9
Source File: Canny.py    From PyRAT with Mozilla Public License 2.0 5 votes vote down vote up
def canny(*args, **kwargs):
    return Canny(*args, **kwargs).run(**kwargs) 
Example #10
Source File: Canny.py    From PyRAT with Mozilla Public License 2.0 5 votes vote down vote up
def filter(self, array, *args, **kwargs):
        array[np.isnan(array)] = 0.0
        array = feature.canny(array, sigma=self.sigma)
        return array 
Example #11
Source File: planar_imaging.py    From pylinac with MIT License 5 votes vote down vote up
def _regions(self):
        """All the regions of the canny image that were labeled."""
        return self._get_canny_regions() 
Example #12
Source File: planar_imaging.py    From pylinac with MIT License 5 votes vote down vote up
def _get_canny_regions(self, sigma=2, percentiles=(0.001, 0.01)):
        """Compute the canny edges of the image and return the connected regions found."""
        # copy, filter, and ground the image
        img_copy = copy.copy(self.image)
        img_copy.filter(kind='gaussian', size=sigma)
        img_copy.ground()

        # compute the canny edges with very low thresholds (detects nearly everything)
        lo_th, hi_th = np.percentile(img_copy, percentiles)
        c = feature.canny(img_copy, low_threshold=lo_th, high_threshold=hi_th)

        # label the canny edge regions
        labeled = measure.label(c)
        regions = measure.regionprops(labeled, intensity_image=img_copy)
        return regions 
Example #13
Source File: vision.py    From geoseg with MIT License 5 votes vote down vote up
def canny_edge(img, sigma=1):
    """
    args:
        img : 2D or 3D array
    return:
        edge: outline of image
    """
    if len(img.shape) == 3:
        img = rgb2gray(img)
    edge_bool = feature.canny(img, sigma)
    edge_img = np.zeros((edge_bool.shape), np.uint8)
    edge_img[edge_bool] = 255
    return edge_img 
Example #14
Source File: utils.py    From DeepExplain with MIT License 5 votes vote down vote up
def plot(data, xi=None, cmap='RdBu_r', axis=plt, percentile=100, dilation=3.0, alpha=0.8):
    dx, dy = 0.05, 0.05
    xx = np.arange(0.0, data.shape[1], dx)
    yy = np.arange(0.0, data.shape[0], dy)
    xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
    extent = xmin, xmax, ymin, ymax
    cmap_xi = plt.get_cmap('Greys_r')
    cmap_xi.set_bad(alpha=0)
    overlay = None
    if xi is not None:
        # Compute edges (to overlay to heatmaps later)
        xi_greyscale = xi if len(xi.shape) == 2 else np.mean(xi, axis=-1)
        in_image_upscaled = transform.rescale(xi_greyscale, dilation, mode='constant')
        edges = feature.canny(in_image_upscaled).astype(float)
        edges[edges < 0.5] = np.nan
        edges[:5, :] = np.nan
        edges[-5:, :] = np.nan
        edges[:, :5] = np.nan
        edges[:, -5:] = np.nan
        overlay = edges

    abs_max = np.percentile(np.abs(data), percentile)
    abs_min = abs_max

    if len(data.shape) == 3:
        data = np.mean(data, 2)
    axis.imshow(data, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
    if overlay is not None:
        axis.imshow(overlay, extent=extent, interpolation='none', cmap=cmap_xi, alpha=alpha)
    axis.axis('off')
    return axis 
Example #15
Source File: images.py    From pulse2percept with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def filter(self, filt, **kwargs):
        """Filter the image

        Parameters
        ----------
        filt : str
            Image filter. Additional parameters can be passed as keyword
            arguments. The following filters are supported:

            *  'sobel': Edge filter the image using the `Sobel filter
               <https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.sobel>`_.
            *  'scharr': Edge filter the image using the `Scarr filter
               <https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.scharr>`_.
            *  'canny': Edge filter the image using the `Canny algorithm
               <https://scikit-image.org/docs/stable/api/skimage.feature.html#skimage.feature.canny>`_.
               You can also specify ``sigma``, ``low_threshold``,
               ``high_threshold``, ``mask``, and ``use_quantiles``.
            *  'median': Return local median of the image.
        **kwargs :
            Additional parameters passed to the filter

        Returns
        -------
        stim : `ImageStimulus`
            A copy of the stimulus object with the filtered image
        """
        if not isinstance(filt, str):
            raise TypeError("'filt' must be a string, not %s." % type(filt))
        img = self.data.reshape(self.img_shape)
        if filt.lower() == 'sobel':
            img = sobel(img, **kwargs)
        elif filt.lower() == 'scharr':
            img = scharr(img, **kwargs)
        elif filt.lower() == 'canny':
            img = canny(img, **kwargs)
        elif filt.lower() == 'median':
            img = median(img, **kwargs)
        else:
            raise ValueError("Unknown filter '%s'." % filt)
        return ImageStimulus(img, electrodes=self.electrodes,
                             metadata=self.metadata) 
Example #16
Source File: eval.py    From SharpNet with GNU General Public License v3.0 4 votes vote down vote up
def compute_depth_boundary_error(edges_gt, pred, mask=None, low_thresh=0.15, high_thresh=0.3):
    # skip dbe if there is no ground truth distinct edge
    if np.sum(edges_gt) == 0:
        dbe_acc = np.nan
        dbe_com = np.nan
        edges_est = np.empty(pred.shape).astype(int)
    else:

        # normalize est depth map from 0 to 1
        pred_normalized = pred.copy().astype('f')
        pred_normalized[pred_normalized == 0] = np.nan
        pred_normalized = pred_normalized - np.nanmin(pred_normalized)
        pred_normalized = pred_normalized / np.nanmax(pred_normalized)

        # apply canny filter
        edges_est = feature.canny(pred_normalized, sigma=np.sqrt(2), low_threshold=low_thresh,
                                  high_threshold=high_thresh)

        # compute distance transform for chamfer metric
        D_gt = ndimage.distance_transform_edt(1 - edges_gt)
        D_est = ndimage.distance_transform_edt(1 - edges_est)

        max_dist_thr = 10.  # Threshold for local neighborhood

        mask_D_gt = D_gt < max_dist_thr  # truncate distance transform map

        E_fin_est_filt = edges_est * mask_D_gt  # compute shortest distance for all predicted edges
        if mask is None:
            mask = np.ones(shape=E_fin_est_filt.shape)
        E_fin_est_filt = E_fin_est_filt * mask
        D_gt = D_gt * mask

        if np.sum(E_fin_est_filt) == 0:  # assign MAX value if no edges could be detected in prediction
            dbe_acc = max_dist_thr
            dbe_com = max_dist_thr
        else:
            # accuracy: directed chamfer distance of predicted edges towards gt edges
            dbe_acc = np.nansum(D_gt * E_fin_est_filt) / np.nansum(E_fin_est_filt)

            # completeness: sum of undirected chamfer distances of predicted and gt edges
            ch1 = D_gt * edges_est  # dist(predicted,gt)
            ch1[ch1 > max_dist_thr] = max_dist_thr  # truncate distances
            ch2 = D_est * edges_gt  # dist(gt, predicted)
            ch2[ch2 > max_dist_thr] = max_dist_thr  # truncate distances
            res = ch1 + ch2  # summed distances
            dbe_com = np.nansum(res) / (np.nansum(edges_est) + np.nansum(edges_gt))  # normalized

    return dbe_acc, dbe_com, edges_est, D_est 
Example #17
Source File: __init__.py    From aim with MIT License 4 votes vote down vote up
def segment(img_bgr, h_blur=13, v_blur=9):
    BW = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(BW, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    denoised = rank.median(BW, disk(5))
    gradient_denoised = rank.gradient(denoised, disk(1))

    gradient_0 = rank.gradient(img_bgr[:, :, 0], disk(1))
    gradient_1 = rank.gradient(img_bgr[:, :, 1], disk(1))
    gradient_2 = rank.gradient(img_bgr[:, :, 2], disk(1))

    sobelx64f = cv2.Sobel(BW, cv2.CV_64F, 1, 0, ksize=5)
    abs_sobel64f = np.absolute(sobelx64f)
    sobel_8u = np.uint8(abs_sobel64f)
    img_canny = canny(BW)

    _, contours_thresh, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    _, contours_0, _ = cv2.findContours(gradient_0, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    _, contours_1, _ = cv2.findContours(gradient_1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    _, contours_2, _ = cv2.findContours(gradient_2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    _, contours_denoised, _ = cv2.findContours(gradient_denoised, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    _, contours_sobel, _ = cv2.findContours(sobel_8u, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    _, contours_canny, _ = cv2.findContours(img_as_ubyte(img_canny), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    contours = contours_0 + contours_1 + contours_2 + contours_denoised + contours_sobel + contours_canny

    # bbox = utils.remove_overlaps(contours)
    bbox = 0 # No big bounding box

    temp = np.zeros_like(BW)

    if bbox != 0:
        for bb in bbox:
            temp = cv2.rectangle(temp, (bb.x1, bb.y1), (bb.x2, bb.y2), (255, 255, 255), 1)

    for c in contours_thresh:
        x, y, w, h = cv2.boundingRect(c)
        temp = cv2.rectangle(temp, (x, y), (x + w, y + h), (255, 255, 255), 1)

    # Horizontal Blurring filter
    size = h_blur # 11
    kmb = np.zeros((size, size))
    kmb[size / 2, :] = np.ones(size)
    kmb = kmb/size

    # Apply horizontal blurring here
    temp = cv2.filter2D(temp, -1, kmb)
    _, contours_all_h, _ = cv2.findContours(temp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    # Vertical Blurring filter
    size = v_blur # 13
    kmb = np.zeros((size, size))
    kmb[:, size / 2] = np.ones(size)
    kmb = kmb/size

    # Apply vertical blurring here
    temp = cv2.filter2D(temp, -1, kmb)
    _, contours_all_v, _ = cv2.findContours(temp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    return contours_all_v, contours_all_h