Python cv2.COLOR_RGB2YCR_CB Examples
The following are 4
code examples of cv2.COLOR_RGB2YCR_CB().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: skin_detector.py From SkinDetector with MIT License | 6 votes |
def get_ycrcb_mask(img, debug=False): assert isinstance(img, numpy.ndarray), 'image must be a numpy array' assert img.ndim == 3, 'skin detection can only work on color images' logger.debug('getting ycrcb mask') lower_thresh = numpy.array([90, 100, 130], dtype=numpy.uint8) upper_thresh = numpy.array([230, 120, 180], dtype=numpy.uint8) img_ycrcb = cv2.cvtColor(img, cv2.COLOR_RGB2YCR_CB) msk_ycrcb = cv2.inRange(img_ycrcb, lower_thresh, upper_thresh) msk_ycrcb[msk_ycrcb < 128] = 0 msk_ycrcb[msk_ycrcb >= 128] = 1 if debug: scripts.display('input', img) scripts.display('mask_ycrcb', msk_ycrcb) return msk_ycrcb.astype(float)
Example #2
Source File: data.py From Keras-GAN-Animeface-Character with MIT License | 5 votes |
def normalize4gan(im): ''' Convert colorspace and cale the input in [-1, 1] range, as described in ganhacks ''' #im = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB).astype(np.float32) # HSV... not helpful. im = im.astype(np.float32) im /= 128.0 im -= 1.0 # now in [-1, 1] return im
Example #3
Source File: test_color.py From imgaug with MIT License | 4 votes |
def test_every_colorspace(self): def _image_to_channel(image, cspace): if cspace == iaa.CSPACE_YCrCb: image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB) return image_cvt[:, :, 0:0+1] elif cspace == iaa.CSPACE_HSV: image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) return image_cvt[:, :, 2:2+1] elif cspace == iaa.CSPACE_HLS: image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) return image_cvt[:, :, 1:1+1] elif cspace == iaa.CSPACE_Lab: if hasattr(cv2, "COLOR_RGB2Lab"): image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab) else: image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) return image_cvt[:, :, 0:0+1] elif cspace == iaa.CSPACE_Luv: if hasattr(cv2, "COLOR_RGB2Luv"): image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv) else: image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) return image_cvt[:, :, 0:0+1] else: assert cspace == iaa.CSPACE_YUV image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) return image_cvt[:, :, 0:0+1] # Max differences between input image and image after augmentation # when no child augmenter is used (for the given example image below). # For some colorspaces the conversion to input colorspace isn't # perfect. # Values were manually checked. max_diff_expected = { iaa.CSPACE_YCrCb: 1, iaa.CSPACE_HSV: 0, iaa.CSPACE_HLS: 0, iaa.CSPACE_Lab: 2, iaa.CSPACE_Luv: 4, iaa.CSPACE_YUV: 1 } image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3)) for cspace in self.valid_colorspaces: with self.subTest(colorspace=cspace): child = _BatchCapturingDummyAugmenter() aug = iaa.WithBrightnessChannels( children=child, to_colorspace=cspace) image_aug = aug(image=image) expected = _image_to_channel(image, cspace) diff = np.abs( image.astype(np.int32) - image_aug.astype(np.int32)) assert np.all(diff <= max_diff_expected[cspace]) assert np.array_equal(child.last_batch.images[0], expected)
Example #4
Source File: lbp.py From CGvsPhoto with MIT License | 4 votes |
def compute_hist(image, mode = 'ltc'): hist_1 = dict() hist_2 = dict() hist_error_1 = dict() hist_error_2 = dict() for i in classes.keys(): hist_1[i] = 0 hist_2[i] = 0 hist_error_1[i] = 0 hist_error_2[i] = 0 image = cv2.cvtColor(image*255, cv2.COLOR_RGB2YCR_CB) # image = compute_jpeg_coef(image) # error_clock = time.clock() error = compute_error_image(image) # error_dur = time.clock() - error_clock # print('Error image computation time : ' + str(error_dur) + 'ms') # code_1_dur = 0 for i in range(1, image.shape[0] - 2): for j in range(1, image.shape[1] - 2): if mode == 'lbp': # code_1_clock = time.clock() b = compute_code(image[i-1:i+2, j-1:j+2,0], mode) hist_1[b] += 1 # code_1_dur += time.clock() - code_1_clock b = compute_code(image[i-1:i+2, j-1:j+2,1], mode) hist_2[b] += 1 # b = compute_code(error[i-1:i+2, j-1:j+2,0], mode) # hist_error_1[b] += 1 # b = compute_code(error[i-1:i+2, j-1:j+2,1], mode) # hist_error_2[b] += 1 if mode == 'ltc': b = compute_code(image[i-1:i+2, j-1:j+2,0], mode) hist_1[b[0]] += 1 hist_1[b[1]] += 1 b = compute_code(image[i-1:i+2, j-1:j+2,1], mode) hist_2[b[0]] += 1 hist_2[b[1]] += 1 # b_error = compute_code(error[i-1:i+2, j-1:j+2]) # hist_error[b_error] += 1 # print('Code 1 computation time : ' + str(code_1_dur/((image.shape[0] - 3)*(image.shape[1] - 3))) + 'ms') F = [] N = (image.shape[0] - 3)*(image.shape[1] - 3) for i in hist_1.keys(): F.append(hist_1[i]/N) F.append(hist_2[i]/N) # F.append(hist_error_1[i]/N) # F.append(hist_error_2[i]/N) return(np.array(F))