Python cv2.COLOR_BGR2YCrCb() Examples
The following are 5
code examples of cv2.COLOR_BGR2YCrCb().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: cal_histogram.py From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 | 5 votes |
def describe(self, image, mask): image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb) hist_y = cv.calcHist([image], [0], mask, self.bins, [0, 256]) hist_cr = cv.calcHist([image], [1], mask, self.bins, [0, 256]) hist_cb = cv.calcHist([image], [2], mask, self.bins, [0, 256]) hist_y = hist_y / np.sum(hist_y) hist_cr = hist_cr / np.sum(hist_cr) hist_cb = hist_cb / np.sum(hist_cb) # 24 dimensions return np.concatenate([hist_y, hist_cr, hist_cb], axis=0).reshape(-1)
Example #2
Source File: Functions.py From Jalali-Lab-Implementation-of-RAISR with GNU General Public License v3.0 | 5 votes |
def make_dataset(dir): images = [] assert os.path.isdir(dir), '%s is not a valid directory' % dir for root, _, fnames in sorted(os.walk(dir)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) return images # Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has different parameters with MATLAB color convertion. # In order to have a fair comparison with the benchmark, we wrote these functions by ourselves.
Example #3
Source File: main.py From Traffic-Sign-Detection with MIT License | 5 votes |
def constrastLimit(image): img_hist_equalized = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) channels = cv2.split(img_hist_equalized) channels[0] = cv2.equalizeHist(channels[0]) img_hist_equalized = cv2.merge(channels) img_hist_equalized = cv2.cvtColor(img_hist_equalized, cv2.COLOR_YCrCb2BGR) return img_hist_equalized
Example #4
Source File: cal_moments.py From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 | 4 votes |
def color_moments(image, mask, color_space): """ function: Color Moment Features image: raw image mask: image mask color_space: 'rgb' or 'lab' or 'ycrcb' or 'hsv' """ assert image.shape[:2] == mask.shape assert color_space.lower() in ['lab', 'rgb', 'ycrcb', 'hsv'] if color_space.lower() == 'rgb': image = cv.cvtColor(image, cv.COLOR_BGR2RGB) elif color_space.lower() == 'hsv': image = cv.cvtColor(image, cv.COLOR_BGR2HSV) elif color_space.lower() == 'lab': image = cv.cvtColor(image, cv.COLOR_BGR2LAB) elif color_space.lower() == 'ycrcb': image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb) else: raise ValueError("Color space error...") # Split image channels info c1, c2, c3 = cv.split(image) color_feature = [] # Only process mask != 0 channel region c1 = c1[np.where(mask != 0)] c2 = c2[np.where(mask != 0)] c3 = c3[np.where(mask != 0)] # Extract mean mean_1 = np.mean(c1) mean_2 = np.mean(c2) mean_3 = np.mean(c3) # Extract variance variance_1 = np.std(c1) variance_2 = np.std(c2) variance_3 = np.std(c3) # Extract skewness skewness_1 = np.mean(np.abs(c1 - mean_1) ** 3) ** (1. / 3) skewness_2 = np.mean(np.abs(c1 - mean_2) ** 3) ** (1. / 3) skewness_3 = np.mean(np.abs(c1 - mean_3) ** 3) ** (1. / 3) color_feature.extend( [mean_1, mean_2, mean_3, variance_1, variance_2, variance_3, skewness_1, skewness_2, skewness_3]) return color_feature
Example #5
Source File: pipline_test.py From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 | 4 votes |
def color_moments(image, mask, color_space): """ function: Color Moment Features image: raw image mask: image mask color_space: 'rgb' or 'lab' or 'ycrcb' or 'hsv' """ assert image.shape[:2] == mask.shape assert color_space.lower() in ['lab', 'rgb', 'ycrcb', 'hsv'] if color_space.lower() == 'rgb': image = cv.cvtColor(image, cv.COLOR_BGR2RGB) elif color_space.lower() == 'hsv': image = cv.cvtColor(image, cv.COLOR_BGR2HSV) elif color_space.lower() == 'lab': image = cv.cvtColor(image, cv.COLOR_BGR2LAB) elif color_space.lower() == 'ycrcb': image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb) else: raise ValueError("Color space error...") # Split image channels info c1, c2, c3 = cv.split(image) color_feature = [] # Only process mask != 0 channel region c1 = c1[np.where(mask != 0)] c2 = c2[np.where(mask != 0)] c3 = c3[np.where(mask != 0)] # Extract mean mean_1 = np.mean(c1) mean_2 = np.mean(c2) mean_3 = np.mean(c3) # Extract variance variance_1 = np.std(c1) variance_2 = np.std(c2) variance_3 = np.std(c3) # Extract skewness skewness_1 = np.mean(np.abs(c1 - mean_1) ** 3) ** (1. / 3) skewness_2 = np.mean(np.abs(c1 - mean_2) ** 3) ** (1. / 3) skewness_3 = np.mean(np.abs(c1 - mean_3) ** 3) ** (1. / 3) color_feature.extend( [mean_1, mean_2, mean_3, variance_1, variance_2, variance_3, skewness_1, skewness_2, skewness_3]) return color_feature