Python cv2.COLOR_Lab2BGR() Examples
The following are 3
code examples of cv2.COLOR_Lab2BGR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: clahe_histogram_equalization.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def equalize_clahe_color_lab(img): """Equalize the image splitting it after conversion to LAB and applying CLAHE to the L channel and merging the channels and convert back to BGR """ cla = cv2.createCLAHE(clipLimit=4.0) L, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab)) eq_L = cla.apply(L) eq_image = cv2.cvtColor(cv2.merge([eq_L, a, b]), cv2.COLOR_Lab2BGR) return eq_image
Example #2
Source File: colorization_evaluator.py From open_model_zoo with Apache License 2.0 | 5 votes |
def postprocessing(self, res, img_l, output_blob, img_size): update_res = (res[output_blob] * self.color_coeff.transpose()[:, :, np.newaxis, np.newaxis]).sum(1) out = update_res.transpose((1, 2, 0)).astype(np.float32) out = cv2.resize(out, img_size) img_lab_out = np.concatenate((img_l[:, :, np.newaxis], out), axis=2) new_result = [np.clip(cv2.cvtColor(img_lab_out, cv2.COLOR_Lab2BGR), 0, 1)] return new_result
Example #3
Source File: makeup.py From face-parsing.PyTorch with MIT License | 4 votes |
def hair(image, parsing, part=17, color=[230, 50, 20]): b, g, r = color #[10, 50, 250] # [10, 250, 10] tar_color = np.zeros_like(image) tar_color[:, :, 0] = b tar_color[:, :, 1] = g tar_color[:, :, 2] = r image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV) if part == 12 or part == 13: image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2] else: image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1] changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR) if part == 17: changed = sharpen(changed) changed[parsing != part] = image[parsing != part] # changed = cv2.resize(changed, (512, 512)) return changed # # def lip(image, parsing, part=17, color=[230, 50, 20]): # b, g, r = color #[10, 50, 250] # [10, 250, 10] # tar_color = np.zeros_like(image) # tar_color[:, :, 0] = b # tar_color[:, :, 1] = g # tar_color[:, :, 2] = r # # image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab) # il, ia, ib = cv2.split(image_lab) # # tar_lab = cv2.cvtColor(tar_color, cv2.COLOR_BGR2Lab) # tl, ta, tb = cv2.split(tar_lab) # # image_lab[:, :, 0] = np.clip(il - np.mean(il) + tl, 0, 100) # image_lab[:, :, 1] = np.clip(ia - np.mean(ia) + ta, -127, 128) # image_lab[:, :, 2] = np.clip(ib - np.mean(ib) + tb, -127, 128) # # # changed = cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR) # # if part == 17: # changed = sharpen(changed) # # changed[parsing != part] = image[parsing != part] # # changed = cv2.resize(changed, (512, 512)) # return changed