Python cv2.Laplacian() Examples
The following are 30
code examples of cv2.Laplacian().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: pycv2.py From vrequest with MIT License | 17 votes |
def laplacian(filepathname): v = cv2.imread(filepathname) s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY) s = cv2.Laplacian(s, cv2.CV_16S, ksize=3) s = cv2.convertScaleAbs(s) cv2.imshow('nier',s) return s # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #2
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 10 votes |
def _lapulaseDetection(self, imgName): """ :param strdir: 文件所在的目录 :param name: 文件名称 :return: 检测模糊后的分数 """ # step1: 预处理 img2gray, reImg = self.preImgOps(imgName) # step2: laplacian算子 获取评分 resLap = cv2.Laplacian(img2gray, cv2.CV_64F) score = resLap.var() print("Laplacian %s score of given image is %s", str(score)) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_lapulaseDetection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName # 显示 cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) # step3: 返回分数 return score
Example #3
Source File: OpenCV_var.py From Image-Blur-Detection with GNU General Public License v3.0 | 6 votes |
def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() # In[ ]: # In[ ]: #accuracy_score(y, y_pred) # In[4]:
Example #4
Source File: find_best_quality_images.py From ALPR_System with Apache License 2.0 | 6 votes |
def get_best_images(plate_images, num_img_return): """ Get the top num_img_return quality images (with the least blur). Laplacian function returns a value which indicates how blur the image is. The lower the value, the more blur the image have """ # first, pick the image with the largest area because the bigger the image, the bigger the characters on the plate if len(plate_images) > (num_img_return + 2): plate_images = sorted(plate_images, key=lambda x : x[0].shape[0]*x[0].shape[1], reverse=True)[:(num_img_return+2)] # secondly, pick the images with the least blur if len(plate_images) > num_img_return: plate_images = sorted(plate_images, key=lambda img : cv2.Laplacian(img[0], cv2.CV_64F).var(), reverse=True)[:num_img_return] # img[0] because plate_images = [plate image, char on plate] return plate_images
Example #5
Source File: FocusStack.py From focusstack with Apache License 2.0 | 6 votes |
def doLap(image): # YOU SHOULD TUNE THESE VALUES TO SUIT YOUR NEEDS kernel_size = 5 # Size of the laplacian window blur_size = 5 # How big of a kernal to use for the gaussian blur # Generally, keeping these two values the same or very close works well # Also, odd numbers, please... blurred = cv2.GaussianBlur(image, (blur_size,blur_size), 0) return cv2.Laplacian(blurred, cv2.CV_64F, ksize=kernel_size) # # This routine finds the points of best focus in all images and produces a merged result... #
Example #6
Source File: process.py From lowpolypy with MIT License | 6 votes |
def get_laplace_points(self, image: np.ndarray, num_points=500) -> np.ndarray: if num_points <= 0: return np.zeros((0, 2), dtype=np.uint8) image = cv2.GaussianBlur(image, (15, 15), 0) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = np.uint8(np.absolute(cv2.Laplacian(image, cv2.CV_64F, 19))) image = cv2.GaussianBlur(image, (15, 15), 0) image = (image * (255 / image.max())).astype(np.uint8) image = image.astype(np.float32) / image.sum() if self.options['visualize_laplace']: self.visualize_image(image, 'laplace') weights = np.ravel(image) coordinates = np.arange(0, weights.size, dtype=np.uint32) choices = np.random.choice(coordinates, size=num_points, replace=False, p=weights) raw_points = np.unravel_index(choices, image.shape) points = np.stack(raw_points, axis=-1)[..., ::-1] return points
Example #7
Source File: autoRIFT.py From autoRIFT with Apache License 2.0 | 6 votes |
def preprocess_filt_lap(self): ''' Do the pre processing using Laplacian filter (2.5 min / 4 min). ''' import cv2 import numpy as np if self.zeroMask is not None: self.zeroMask = (self.I1 == 0) self.I1 = 20.0 * np.log10(self.I1) self.I1 = cv2.Laplacian(self.I1,-1,ksize=self.WallisFilterWidth,borderType=cv2.BORDER_CONSTANT) self.I2 = 20.0 * np.log10(self.I2) self.I2 = cv2.Laplacian(self.I2,-1,ksize=self.WallisFilterWidth,borderType=cv2.BORDER_CONSTANT)
Example #8
Source File: 05_cartoonizing.py From OpenCV-3-x-with-Python-By-Example with MIT License | 6 votes |
def cartoonize_image(img, ksize=5, sketch_mode=False): num_repetitions, sigma_color, sigma_space, ds_factor = 10, 5, 7, 4 # Convert image to grayscale img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Apply median filter to the grayscale image img_gray = cv2.medianBlur(img_gray, 7) # Detect edges in the image and threshold it edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=ksize) ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV) # 'mask' is the sketch of the image if sketch_mode: return cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) # Resize the image to a smaller size for faster computation img_small = cv2.resize(img, None, fx=1.0/ds_factor, fy=1.0/ds_factor, interpolation=cv2.INTER_AREA) # Apply bilateral filter the image multiple times for i in range(num_repetitions): img_small = cv2.bilateralFilter(img_small, ksize, sigma_color, sigma_space) img_output = cv2.resize(img_small, None, fx=ds_factor, fy=ds_factor, interpolation=cv2.INTER_LINEAR) dst = np.zeros(img_gray.shape) # Add the thick boundary lines to the image using 'AND' operator dst = cv2.bitwise_and(img_output, img_output, mask=mask) return dst
Example #9
Source File: plant_features.py From bonnet with GNU General Public License v3.0 | 6 votes |
def laplacian(mask): ''' Get 2nd order gradients using the Laplacian ''' # blur mask = cv2.GaussianBlur(mask, (5, 5), 0) # edges with laplacian laplacian = cv2.Laplacian(mask, cv2.CV_64F, 5) # stretch laplacian = contrast_stretch(laplacian) # cast laplacian = np.uint8(laplacian) return laplacian
Example #10
Source File: artistic.py From imgaug with MIT License | 6 votes |
def _find_edges_laplacian(image, edge_multiplier, from_colorspace): image_gray = colorlib.change_colorspace_(np.copy(image), to_colorspace=colorlib.CSPACE_GRAY, from_colorspace=from_colorspace) image_gray = image_gray[..., 0] edges_f = cv2.Laplacian(_normalize_cv2_input_arr_(image_gray / 255.0), cv2.CV_64F) edges_f = np.abs(edges_f) edges_f = edges_f ** 2 vmax = np.percentile(edges_f, min(int(90 * (1/edge_multiplier)), 99)) edges_f = np.clip(edges_f, 0.0, vmax) / vmax edges_uint8 = np.clip(np.round(edges_f * 255), 0, 255.0).astype(np.uint8) edges_uint8 = _blur_median(edges_uint8, 3) edges_uint8 = _threshold(edges_uint8, 50) return edges_uint8 # Added in 0.4.0.
Example #11
Source File: cartoonizing.py From Mastering-OpenCV-4-with-Python with MIT License | 6 votes |
def sketch_image(img): """Sketches the image applying a laplacian operator to detect the edges""" # Convert to gray scale img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Apply median filter img_gray = cv2.medianBlur(img_gray, 5) # Detect edges using cv2.Laplacian() edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5) # Threshold the edges image: ret, thresholded = cv2.threshold(edges, 70, 255, cv2.THRESH_BINARY_INV) return thresholded
Example #12
Source File: video.py From cv with MIT License | 5 votes |
def cartoonize_image(img, ds_factor=4, sketch_mode=False): #convert to gray scale img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #apply median filter img_gray = cv2.medianBlur(img_gray, 7) #detect edges and threshold the imag edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5) ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV) #mask is the sketch of the image if sketch_mode: return cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) img_small = cv2.resize(img, None, fx=1.0/ds_factor, fy=1.0/ds_factor, interpolation = cv2.INTER_AREA) num_repetitions = 10 sigma_color = 5 sigma_space = 7 size = 5 #apply bilateral filter multiple times for i in range(num_repetitions): img_small = cv2.bilateralFilter(img_small, size, sigma_color, sigma_space) img_output = cv2.resize(img_small, None, fx=ds_factor, fy=ds_factor, interpolation=cv2.INTER_LINEAR) dst = np.zeros(img_gray.shape) dst = cv2.bitwise_and(img_output, img_output, mask=mask) return dst
Example #13
Source File: Util.py From PReMVOS with MIT License | 5 votes |
def geo_dist(img, pts): # Import these only on demand since pyximport interferes with pycocotools import pyximport pyximport.install() from ReID_net.datasets.Util import sweep img = np.copy(img) / 255.0 #G = nd.gaussian_gradient_magnitude(img, 1.0) img = cv2.GaussianBlur(img, (3,3), 1.0) #G = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5) sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5) sobel_abs = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0) sobel_abs = (sobel_abs[:, :, 0] ** 2 + sobel_abs[:, :, 1] ** 2 + sobel_abs[:, :, 2] ** 2) ** (1 / 2.0) #G = (G[:, :, 0] ** 2 + G[:, :, 1] ** 2 + G[:, :, 2] ** 2) ** (1 / 2.0) # c = 1 + G * 200 # c = G / np.max(G) #c=sobel_abs / 255.0 c=1+sobel_abs # plt.imshow(sobel_abs) # plt.colorbar() # plt.show() dt = np.zeros_like(c) dt[:] = 1000 dt[pts] = 0 sweeps = [dt, dt[:, ::-1], dt[::-1], dt[::-1, ::-1]] costs = [c, c[:, ::-1], c[::-1], c[::-1, ::-1]] for i, (a, c) in enumerate(it.cycle(list(zip(sweeps, costs)))): # print i, if sweep.sweep(a, c) < 1.0 or i >= 40: break return dt
Example #14
Source File: track_preprocess.py From sanet_relocal_demo with GNU General Public License v3.0 | 5 votes |
def variance_of_laplacian(image): return cv2.Laplacian(image, cv2.CV_32FC3).var()
Example #15
Source File: detection.py From BlurDetection2 with MIT License | 5 votes |
def estimate_blur(image: numpy.array, threshold: int = 100): if image.ndim == 3: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blur_map = cv2.Laplacian(image, cv2.CV_64F) score = numpy.var(blur_map) return blur_map, score, bool(score < threshold)
Example #16
Source File: toolbox_opencv.py From remi with Apache License 2.0 | 5 votes |
def on_new_image_listener(self, emitter): try: self.image_source = emitter border = OpencvBilateralFilter.border_type[self.border] if type(self.border) == str else self.border self.set_image_data(cv2.Laplacian(emitter.img, -1, borderType=border)) except Exception: print(traceback.format_exc())
Example #17
Source File: 03_image_derivatives.py From Practical-Computer-Vision with MIT License | 5 votes |
def plot_cv_img(input_image, output_image1, output_image2): """ Converts an image from BGR to RGB and plots """ fig, ax = plt.subplots(nrows=1, ncols=3) ax[0].imshow(input_image, cmap='gray') ax[0].set_title('Input Image') ax[0].axis('off') ax[1].imshow(output_image1, cmap='gray') ax[1].set_title('Laplacian Image') ax[1].axis('off') ax[2].imshow(output_image2, cmap = 'gray') ax[2].set_title('Laplacian of Gaussian') ax[2].axis('off') # ax[3].imshow(output_image3, cmap = 'gray') # ax[3].set_title('Sharpened Image') # ax[3].axis('off') plt.savefig('../figures/03_image_derivatives_log.png') plt.show()
Example #18
Source File: 03_image_derivatives.py From Practical-Computer-Vision with MIT License | 5 votes |
def main(): # read an image img = cv2.imread('../figures/building_crop.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # sobel x_sobel = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) y_sobel = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # laplacian lapl = cv2.Laplacian(img,cv2.CV_64F, ksize=5) # gaussian blur blur = cv2.GaussianBlur(img,(5,5),0) # laplacian of gaussian log = cv2.Laplacian(blur,cv2.CV_64F, ksize=5) # res = np.hstack([img, x_sobel, y_sobel]) # plt.imshow(res, cmap='gray') # plt.axis('off') # plt.show() # lapl = np.asarray(lapl, dtype= np.uint) # Do plot plot_cv_img(img, lapl, log)
Example #19
Source File: video.py From cvcalib with Apache License 2.0 | 5 votes |
def try_approximate_corners_blur(self, board_dims, sharpness_threshold): sharpness = cv2.Laplacian(self.frame, cv2.CV_64F).var() if sharpness < sharpness_threshold: return False found, corners = cv2.findChessboardCorners(self.frame, board_dims) self.current_image_points = corners return found
Example #20
Source File: laplace_filter.py From plantcv with MIT License | 5 votes |
def laplace_filter(gray_img, ksize, scale): """This is a filtering method used to identify and highlight fine edges based on the 2nd derivative. A very sensetive method to highlight edges but will also amplify background noise. ddepth = -1 specifies that the dimensions of output image will be the same as the input image. Inputs: gray_img = Grayscale image data ksize = apertures size used to calculate 2nd derivative filter, specifies the size of the kernel (must be an odd integer: 1,3,5...) scale = scaling factor applied (multiplied) to computed Laplacian values (scale = 1 is unscaled) Returns: lp_filtered = laplacian filtered image :param gray_img: numpy.ndarray :param ksize: int :param scale: int :return lp_filtered: numpy.ndarray """ lp_filtered = cv2.Laplacian(src=gray_img, ddepth=-1, ksize=ksize, scale=scale) params.device += 1 if params.debug == 'print': print_image(lp_filtered, os.path.join(params.debug_outdir, str(params.device) + '_lp_out_k' + str(ksize) + '_scale' + str(scale) + '.png')) elif params.debug == 'plot': plot_image(lp_filtered, cmap='gray') return lp_filtered
Example #21
Source File: detect_focus.py From pi-tracking-telescope with MIT License | 5 votes |
def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() # initialize the camera and grab a reference to the raw camera capture
Example #22
Source File: detect_focus2.py From pi-tracking-telescope with MIT License | 5 votes |
def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var()
Example #23
Source File: focus.py From pi-tracking-telescope with MIT License | 5 votes |
def variance_of_laplacian(self, image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var()
Example #24
Source File: opencv_py.py From python-urbanPlanning with MIT License | 5 votes |
def edgeDetection(inputImg_edge): imgEdge=cv2.imread(inputImg_edge,cv2.IMREAD_GRAYSCALE) #读取图像 sobelHorizontal=cv2.Sobel(imgEdge,cv2.CV_64F,1,0,ksize=5) #索贝尔滤波器Sobel filter,横向。参数解释通过help(cv2.Sobel)查看 """ help(cv2.Sobel) Help on built-in function Sobel: . @param src input image. 输入待处理的图像 . @param dst output image of the same size and the same number of channels as src . 输出图像,同大小,同通道数 . @param ddepth output image depth, see @ref filter_depths "combinations"; in the case of. 8-bit input images it will result in truncated derivatives. 图像深度,-1时与原图像深度同,目标图像的深度必须大于等于原图像深度。避免truncated derivatives而设置cv2.CV_64F数据类型 . @param dx order of the derivative x. x方向求导阶数,0表示这个方向没有求导,一般为0,1,2 . @param dy order of the derivative y. y方向求导阶数,同上 . @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. 算子大小,必须为1、3、5、7 . @param scale optional scale factor for the computed derivative values; by default, no scaling is. applied (see cv::getDerivKernels for details). 缩放导数的比例常数,默认情况五伸缩系数 . @param delta optional delta value that is added to the results prior to storing them in dst. 可选增量,默认情况无额外值加到dst中 . @param borderType pixel extrapolation method, see cv::BorderTypes 图像边界模式 . @sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar """ sobelVertical=cv2.Sobel(imgEdge,cv2.CV_64F,0,1,ksize=5) #索贝尔滤波器Sobel filter,纵向 laplacian=cv2.Laplacian(imgEdge,cv2.CV_64F) #拉普拉斯边检测器,Laplacian edge detector canny=cv2.Canny(imgEdge,50,240) #Canny边检测器Canny edge detector # print(imgEdge) cv2.namedWindow('img') # cv2.imshow('original',imgEdge) # cv2.imshow('sobel horizontal',sobelHorizontal) #输出显示图像 # cv2.imwrite(os.path.join(rootDirectory,'sobel horizontal.jpg'),sobelHorizontal) # cv2.imshow('sobel vertical',sobelVertical) # cv2.imwrite(os.path.join(rootDirectory,'sobel vertical.jpg'),sobelVertical) cv2.imshow('laplacian',laplacian) cv2.imwrite(os.path.join(rootDirectory,'laplacian.jpg'),laplacian) # cv2.imshow('canny',canny) # cv2.imwrite(os.path.join(rootDirectory,'canny.jpg'),canny) cv2.waitKey() #检测棱角
Example #25
Source File: view-mongo-images.py From smart-zoneminder with MIT License | 5 votes |
def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var()
Example #26
Source File: face_detect_server.py From smart-zoneminder with MIT License | 5 votes |
def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var()
Example #27
Source File: filters.py From VIP with MIT License | 5 votes |
def cube_filter_highpass(array, mode='laplacian', verbose=True, **kwargs): """ Apply ``frame_filter_highpass`` to the frames of a 3d or 4d cube. Parameters ---------- array : numpy ndarray Input cube, 3d or 4d. mode : str, optional ``mode`` parameter to the ``frame_filter_highpass`` function. Defaults to a Laplacian high-pass filter. verbose : bool, optional If ``True`` timing and progress bar are shown. **kwargs : dict Passed through to the ``frame_filter_highpass`` function. Returns ------- filtered : numpy ndarray High-pass filtered cube. """ array_out = np.empty_like(array) if array.ndim == 3: for i in Progressbar(range(array.shape[0]), verbose=verbose): array_out[i] = frame_filter_highpass(array[i], mode=mode, **kwargs) elif array.ndim == 4: for i in Progressbar(range(array.shape[1]), verbose=verbose): for lam in range(array.shape[0]): array_out[lam][i] = frame_filter_highpass(array[lam][i], mode=mode, **kwargs) else: raise TypeError('Input array is not a 3d or 4d cube') return array_out
Example #28
Source File: detect_servers_tpu.py From edge-tpu-servers with MIT License | 5 votes |
def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var()
Example #29
Source File: crop_imgs.py From TENet with MIT License | 5 votes |
def worker(path, select_folder, waste_img_folder, crop_sz, stride, thres_sz, cont_var_thresh, freq_var_thresh): img_name = os.path.basename(path) img = cv2.imread(path, cv2.IMREAD_UNCHANGED) h, w, c = img.shape h_space = np.arange(0, h - crop_sz + 1, stride) if h - (h_space[-1] + crop_sz) > thres_sz: h_space = np.append(h_space, h - crop_sz) w_space = np.arange(0, w - crop_sz + 1, stride) if w - (w_space[-1] + crop_sz) > thres_sz: w_space = np.append(w_space, w - crop_sz) index = 0 for x in h_space: for y in w_space: index += 1 patch_name = img_name.replace('.png', '_s{:05d}.png'.format(index)) patch = img[x:x + crop_sz, y:y + crop_sz, :] im_gray = patch[:, :, 1] [mean, var] = cv2.meanStdDev(im_gray) freq_var = cv2.Laplacian(im_gray, cv2.CV_8U).var() if var > cont_var_thresh and freq_var>freq_var_thresh: cv2.imwrite(os.path.join(select_folder, patch_name), patch) else: cv2.imwrite(os.path.join(waste_img_folder, patch_name), patch) return 'Processing {:s} ...'.format(img_name)
Example #30
Source File: main.py From Traffic-Sign-Detection with MIT License | 5 votes |
def LaplacianOfGaussian(image): LoG_image = cv2.GaussianBlur(image, (3,3), 0) # paramter gray = cv2.cvtColor( LoG_image, cv2.COLOR_BGR2GRAY) LoG_image = cv2.Laplacian( gray, cv2.CV_8U,3,3,2) # parameter LoG_image = cv2.convertScaleAbs(LoG_image) return LoG_image