Python cv2.meanStdDev() Examples
The following are 16
code examples of cv2.meanStdDev().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: spfunctions.py From spfeas with MIT License | 7 votes |
def fourier_transform(ch_bd): dft = cv2.dft(np.float32(ch_bd), flags=cv2.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) # get the Power Spectrum magnitude_spectrum = 20. * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1])) psd1D = azimuthal_avg(magnitude_spectrum) return list(cv2.meanStdDev(psd1D))
Example #2
Source File: utils.py From SSD-Pytorch with Apache License 2.0 | 7 votes |
def cal_mean_std(images_dir): """ 给定数据图片根目录,计算图片整体均值与方差 :param images_dir: :return: """ img_filenames = os.listdir(images_dir) m_list, s_list = [], [] for img_filename in tqdm(img_filenames): img = cv2.imread(images_dir + '/' + img_filename) img = img / 255.0 m, s = cv2.meanStdDev(img) m_list.append(m.reshape((3,))) s_list.append(s.reshape((3,))) print(m_list) m_array = np.array(m_list) s_array = np.array(s_list) m = m_array.mean(axis=0, keepdims=True) s = s_array.mean(axis=0, keepdims=True) print('mean: ',m[0][::-1]) print('std: ',s[0][::-1]) return m
Example #3
Source File: stats.py From pytorch-planet-amazon with Apache License 2.0 | 6 votes |
def main(): jpg_inputs = find_inputs(JPGPATH, types=('.jpg',), prefix=PREFIX) tif_inputs = find_inputs(TIFPATH, types=('.tif',), prefix=PREFIX) jpg_stats = [] for f in jpg_inputs: img = cv2.imread(f[1]) mean, std = cv2.meanStdDev(img) jpg_stats.append(np.array([mean[::-1] / 255, std[::-1] / 255])) jpg_vals = np.mean(jpg_stats, axis=0) print(jpg_vals) tif_stats = [] for f in tif_inputs: img = cv2.imread(f[1], -1) img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA) mean, std = cv2.meanStdDev(img) tif_stats.append(np.array([mean, std])) tif_vals = np.mean(tif_stats, axis=0) print(tif_vals)
Example #4
Source File: Inria.py From segmentation-networks-benchmark with MIT License | 6 votes |
def compute_mean_std(dataset): """ https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation """ one_over_255 = float(1. / 255.) global_mean = np.zeros(3, dtype=np.float64) global_var = np.zeros(3, dtype=np.float64) n_items = len(dataset) for image_fname in dataset: x = read_rgb(image_fname) * one_over_255 mean, stddev = cv2.meanStdDev(x) global_mean += np.squeeze(mean) global_var += np.squeeze(stddev) ** 2 return global_mean / n_items, np.sqrt(global_var)
Example #5
Source File: crop_imgs.py From TENet with MIT License | 5 votes |
def worker(path, select_folder, waste_img_folder, crop_sz, stride, thres_sz, cont_var_thresh, freq_var_thresh): img_name = os.path.basename(path) img = cv2.imread(path, cv2.IMREAD_UNCHANGED) h, w, c = img.shape h_space = np.arange(0, h - crop_sz + 1, stride) if h - (h_space[-1] + crop_sz) > thres_sz: h_space = np.append(h_space, h - crop_sz) w_space = np.arange(0, w - crop_sz + 1, stride) if w - (w_space[-1] + crop_sz) > thres_sz: w_space = np.append(w_space, w - crop_sz) index = 0 for x in h_space: for y in w_space: index += 1 patch_name = img_name.replace('.png', '_s{:05d}.png'.format(index)) patch = img[x:x + crop_sz, y:y + crop_sz, :] im_gray = patch[:, :, 1] [mean, var] = cv2.meanStdDev(im_gray) freq_var = cv2.Laplacian(im_gray, cv2.CV_8U).var() if var > cont_var_thresh and freq_var>freq_var_thresh: cv2.imwrite(os.path.join(select_folder, patch_name), patch) else: cv2.imwrite(os.path.join(waste_img_folder, patch_name), patch) return 'Processing {:s} ...'.format(img_name)
Example #6
Source File: reinhard_color_normalizer.py From StainTools with MIT License | 5 votes |
def get_mean_std(self, I): """ Get mean and standard deviation of each channel. :param I: Image RGB uint8. :return: """ assert is_uint8_image(I), "Should be a RGB uint8 image" I1, I2, I3 = self.lab_split(I) m1, sd1 = cv.meanStdDev(I1) m2, sd2 = cv.meanStdDev(I2) m3, sd3 = cv.meanStdDev(I3) means = m1, m2, m3 stds = sd1, sd2, sd3 return means, stds
Example #7
Source File: mytransforms.py From pytorch-planet-amazon with Apache License 2.0 | 5 votes |
def __call__(self, img): # This should still be a H x W x C Numpy/OpenCv compat image, not a Torch Tensor assert isinstance(img, np.ndarray) mean, std = cv2.meanStdDev(img) mean, std = mean.astype(np.float32), std.astype(np.float32) img = img.astype(np.float32) img = (img - np.squeeze(mean)) / (np.squeeze(std) + self.std_epsilon) return img
Example #8
Source File: SamplePreprocessor.py From SimpleHTR with MIT License | 5 votes |
def preprocess(img, imgSize, dataAugmentation=False): "put img into target img of size imgSize, transpose for TF and normalize gray-values" # there are damaged files in IAM dataset - just use black image instead if img is None: img = np.zeros([imgSize[1], imgSize[0]]) # increase dataset size by applying random stretches to the images if dataAugmentation: stretch = (random.random() - 0.5) # -0.5 .. +0.5 wStretched = max(int(img.shape[1] * (1 + stretch)), 1) # random width, but at least 1 img = cv2.resize(img, (wStretched, img.shape[0])) # stretch horizontally by factor 0.5 .. 1.5 # create target image and copy sample image into it (wt, ht) = imgSize (h, w) = img.shape fx = w / wt fy = h / ht f = max(fx, fy) newSize = (max(min(wt, int(w / f)), 1), max(min(ht, int(h / f)), 1)) # scale according to f (result at least 1 and at most wt or ht) img = cv2.resize(img, newSize) target = np.ones([ht, wt]) * 255 target[0:newSize[1], 0:newSize[0]] = img # transpose for TF img = cv2.transpose(target) # normalize (m, s) = cv2.meanStdDev(img) m = m[0][0] s = s[0][0] img = img - m img = img / s if s>0 else img return img
Example #9
Source File: MorseDecoder.py From LSTM_morse with MIT License | 5 votes |
def normalize_image(img): # normalize (m, s) = cv2.meanStdDev(img) m = m[0][0] s = s[0][0] img = img - m img = img / s if s>0 else img return img
Example #10
Source File: predefined.py From Yugioh-bot with MIT License | 5 votes |
def get_image_stats(img, left=0, top=0, width=0, height=0): crop_img = img[top:(top + height), left:(left + width)] (means, stds) = cv2.meanStdDev(crop_img) stats = np.concatenate([means, stds]).flatten() return stats
Example #11
Source File: crop_mats.py From TENet with MIT License | 4 votes |
def worker(path, select_folder, waste_folder, img_folder, waste_img_folder, crop_sz, stride, thres_sz, cont_var_thresh, freq_var_thresh): img_name = os.path.basename(path) img = loadmat(path) img = np.asarray(img['ps4k']) n_channels = len(img.shape) if n_channels == 2: h, w = img.shape elif n_channels == 3: h, w, c = img.shape else: raise ValueError('Wrong image shape - {}'.format(n_channels)) h_space = np.arange(0, h - crop_sz + 1, stride) if h - (h_space[-1] + crop_sz) > thres_sz: h_space = np.append(h_space, h - crop_sz) w_space = np.arange(0, w - crop_sz + 1, stride) if w - (w_space[-1] + crop_sz) > thres_sz: w_space = np.append(w_space, w - crop_sz) index = 0 for x in h_space: for y in w_space: index += 1 patch_name = img_name.replace('.mat', '_s{:05d}.mat'.format(index)) img_patch_name = img_name.replace('.mat', '_s{:05d}.tiff'.format(index)) if n_channels == 2: patch = img[x:x + crop_sz, y:y + crop_sz] else: patch = img[x:x + crop_sz, y:y + crop_sz, :] # im_gray = cv2.cvtColor(patch, cv2.COLOR_RGB2GRAY) im_gray = patch[:, :, 1] [mean, var] = cv2.meanStdDev(im_gray) var = var/mean freq_var = cv2.Laplacian(im_gray, cv2.CV_16U).mean() if var > cont_var_thresh and freq_var>freq_var_thresh: savemat(os.path.join(select_folder, patch_name), {'ps': patch}) img_patch = np.delete(patch, 2, 2).astype(float)/(2.**16) img_patch = img_patch ** (1/2.2) *255. img_patch = np.clip(img_patch, 0, 255) cv2.imwrite(os.path.join(img_folder, img_patch_name), np.uint8(img_patch)) # print('saving: %s' % os.path.join(select_folder, patch_name)) else: savemat(os.path.join(waste_folder, patch_name), {'ps': patch}) # img_patch = np.delete(patch, 2, 2) img_patch = np.delete(patch, 2, 2).astype(float)/(2.**16) img_patch = img_patch ** (1/2.2) * 255. img_patch = np.uint8(np.clip(img_patch, 0, 255)) cv2.imwrite(os.path.join(waste_img_folder, img_patch_name), np.uint8(img_patch)) # print('saving: %s' % os.path.join(select_folder, patch_name)) return 'Processing {:s} ...'.format(img_name)
Example #12
Source File: _preprocessors.py From pytorch-widedeep with MIT License | 4 votes |
def transform(self, df: pd.DataFrame) -> np.ndarray: try: self.aap except: raise NotFittedError( "This ImagePreprocessor instance is not fitted yet. " "Call 'fit' with appropriate arguments before using this estimator." ) image_list = df[self.img_col].tolist() if self.verbose: print("Reading Images from {}".format(self.img_path)) imgs = [cv2.imread("/".join([self.img_path, img])) for img in image_list] # finding images with different height and width aspect = [(im.shape[0], im.shape[1]) for im in imgs] aspect_r = [a[0] / a[1] for a in aspect] diff_idx = [i for i, r in enumerate(aspect_r) if r != 1.0] if self.verbose: print("Resizing") resized_imgs = [] for i, img in tqdm(enumerate(imgs), total=len(imgs), disable=self.verbose != 1): if i in diff_idx: resized_imgs.append(self.aap.preprocess(img)) else: resized_imgs.append(self.spp.preprocess(img)) if self.verbose: print("Computing normalisation metrics") mean_R, mean_G, mean_B = [], [], [] std_R, std_G, std_B = [], [], [] for rsz_img in resized_imgs: (mean_b, mean_g, mean_r), (std_b, std_g, std_r) = cv2.meanStdDev(rsz_img) mean_R.append(mean_r) mean_G.append(mean_g) mean_B.append(mean_b) std_R.append(std_r) std_G.append(std_g) std_B.append(std_b) self.normalise_metrics = dict( mean={ "R": np.mean(mean_R) / 255.0, "G": np.mean(mean_G) / 255.0, "B": np.mean(mean_B) / 255.0, }, std={ "R": np.mean(std_R) / 255.0, "G": np.mean(std_G) / 255.0, "B": np.mean(std_B) / 255.0, }, ) return np.asarray(resized_imgs)
Example #13
Source File: getBlobsFeats.py From tierpsy-tracker with MIT License | 4 votes |
def _getBlobFeatures(blob_cnt, blob_mask, roi_image, roi_corner): if blob_cnt.size > 0: area = float(cv2.contourArea(blob_cnt)) # find use the best rotated bounding box, the fitEllipse function produces bad results quite often # this method is better to obtain an estimate of the worm length than # eccentricity (CMx, CMy), (L, W), angle = cv2.minAreaRect(blob_cnt) #adjust CM from the ROI reference frame to the image reference CMx += roi_corner[0] CMy += roi_corner[1] if L == 0 or W == 0: return None #something went wrong abort if W > L: L, W = W, L # switch if width is larger than length quirkiness = np.sqrt(1 - W**2 / L**2) hull = cv2.convexHull(blob_cnt) # for the solidity solidity = area / cv2.contourArea(hull) perimeter = float(cv2.arcLength(blob_cnt, True)) compactness = 4 * np.pi * area / (perimeter**2) # calculate the mean intensity of the worm intensity_mean, intensity_std = cv2.meanStdDev(roi_image, mask=blob_mask) intensity_mean = intensity_mean[0,0] intensity_std = intensity_std[0,0] # calculate hu moments, they are scale and rotation invariant hu_moments = cv2.HuMoments(cv2.moments(blob_cnt)) # save everything into the the proper output format mask_feats = (CMx, CMy, area, perimeter, L, W, quirkiness, compactness, angle, solidity, intensity_mean, intensity_std, *hu_moments.flatten()) else: return tuple([np.nan]*19) return mask_feats
Example #14
Source File: spfunctions.py From spfeas with MIT License | 4 votes |
def feature_fourier(chBd, blk, scs, end_scale): rows, cols = chBd.shape scales_half = int(end_scale / 2.0) scales_blk = end_scale - blk out_len = 0 pix_ctr = 0 for i in range(0, rows-scales_blk, blk): for j in range(0, cols-scales_blk, blk): for k in scs: out_len += 2 # set the output list out_list = np.zeros(out_len, dtype='float32') for i in range(0, rows-scales_blk, blk): for j in range(0, cols-scales_blk, blk): for k in scs: k_half = int(k / 2.0) ch_bd = chBd[i+scales_half-k_half:i+scales_half-k_half+k, j+scales_half-k_half:j+scales_half-k_half+k] # get the Fourier Transform dft = cv2.dft(np.float32(ch_bd), flags=cv2.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) # get the Power Spectrum magnitude_spectrum = 20.0 * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1])) psd1D = azimuthal_avg(magnitude_spectrum) sts = list(cv2.meanStdDev(psd1D)) # plt.subplot(121) # plt.imshow(ch_bd, cmap='gray') # plt.subplot(122) # plt.imshow(magnitude_spectrum, interpolation='nearest') # plt.show() # print psd1D # sys.exit() for st in sts: if np.isnan(st[0][0]): out_list[pix_ctr] = 0.0 else: out_list[pix_ctr] = st[0][0] pix_ctr += 1 out_list[np.isnan(out_list) | np.isinf(out_list)] = 0.0 return out_list
Example #15
Source File: __init__.py From magicwand with MIT License | 4 votes |
def _update(self): """Updates an image in the already drawn window.""" viz = self.img.copy() contours = _find_exterior_contours(self.mask) viz = cv.drawContours(viz, contours, -1, color=(255,) * 3, thickness=-1) viz = cv.addWeighted(self.img, 0.75, viz, 0.25, 0) viz = cv.drawContours(viz, contours, -1, color=(255,) * 3, thickness=1) self.mean, self.stddev = cv.meanStdDev(self.img, mask=self.mask) meanstr = "mean=({:.2f}, {:.2f}, {:.2f})".format(*self.mean[:, 0]) stdstr = "std=({:.2f}, {:.2f}, {:.2f})".format(*self.stddev[:, 0]) cv.imshow(self.name, viz) cv.displayStatusBar(self.name, ", ".join((meanstr, stdstr)))
Example #16
Source File: functional.py From albumentations with MIT License | 4 votes |
def iso_noise(image, color_shift=0.05, intensity=0.5, random_state=None, **kwargs): """ Apply poisson noise to image to simulate camera sensor noise. Args: image (numpy.ndarray): Input image, currently, only RGB, uint8 images are supported. color_shift (float): intensity (float): Multiplication factor for noise values. Values of ~0.5 are produce noticeable, yet acceptable level of noise. random_state: **kwargs: Returns: numpy.ndarray: Noised image """ if image.dtype != np.uint8: raise TypeError("Image must have uint8 channel type") if is_grayscale_image(image): raise TypeError("Image must be RGB") if random_state is None: random_state = np.random.RandomState(42) one_over_255 = float(1.0 / 255.0) image = np.multiply(image, one_over_255, dtype=np.float32) hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) _, stddev = cv2.meanStdDev(hls) luminance_noise = random_state.poisson(stddev[1] * intensity * 255, size=hls.shape[:2]) color_noise = random_state.normal(0, color_shift * 360 * intensity, size=hls.shape[:2]) hue = hls[..., 0] hue += color_noise hue[hue < 0] += 360 hue[hue > 360] -= 360 luminance = hls[..., 1] luminance += (luminance_noise / 255) * (1.0 - luminance) image = cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) * 255 return image.astype(np.uint8)