Python cv2.COLOR_LAB2RGB Examples
The following are 14
code examples of cv2.COLOR_LAB2RGB().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: luminosity_standardizer.py From StainTools with MIT License | 6 votes |
def standardize(I, percentile=95): """ Transform image I to standard brightness. Modifies the luminosity channel such that a fixed percentile is saturated. :param I: Image uint8 RGB. :param percentile: Percentile for luminosity saturation. At least (100 - percentile)% of pixels should be fully luminous (white). :return: Image uint8 RGB with standardized brightness. """ assert is_uint8_image(I), "Image should be RGB uint8." I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB) L_float = I_LAB[:, :, 0].astype(float) p = np.percentile(L_float, percentile) I_LAB[:, :, 0] = np.clip(255 * L_float / p, 0, 255).astype(np.uint8) I = cv.cvtColor(I_LAB, cv.COLOR_LAB2RGB) return I
Example #2
Source File: omnirobot_simulator_server.py From robotics-rl-srl with MIT License | 6 votes |
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False): """ render the different environment luminosity """ # variate luminosity and color origin_image_LAB = cv2.cvtColor( origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F) origin_image_LAB[:, :, 0] = origin_image_LAB[:, :, 0] * (np.random.randn() * noise_var + 1.0) origin_image_LAB[:, :, 1] = origin_image_LAB[:, :, 1] * (np.random.randn() * noise_var + 1.0) origin_image_LAB[:, :, 2] = origin_image_LAB[:, :, 2] * (np.random.randn() * noise_var + 1.0) out_image = cv2.cvtColor( origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3) return out_image
Example #3
Source File: image.py From ColorHistogram with MIT License | 5 votes |
def Lab2rgb(img): rgb = cv2.cvtColor(img, cv2.COLOR_LAB2RGB) return rgb
Example #4
Source File: image.py From GuidedFilter with MIT License | 5 votes |
def Lab2rgb(img): rgb = cv2.cvtColor(img, cv2.COLOR_LAB2RGB) return rgb
Example #5
Source File: reinhard_color_normalizer.py From StainTools with MIT License | 5 votes |
def merge_back(I1, I2, I3): """ Take seperate LAB channels and merge back to give RGB uint8. :param I1: L :param I2: A :param I3: B :return: Image RGB uint8. """ I1 *= 2.55 # should now be in range [0,255] I2 += 128.0 # should now be in range [0,255] I3 += 128.0 # should now be in range [0,255] I = np.clip(cv.merge((I1, I2, I3)), 0, 255).astype(np.uint8) return cv.cvtColor(I, cv.COLOR_LAB2RGB)
Example #6
Source File: train154_9ch_fold.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #7
Source File: train50_9ch_fold.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #8
Source File: train101_9ch_fold.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #9
Source File: train92_9ch_fold.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #10
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #11
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #12
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)): img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB) return img_output
Example #13
Source File: functional.py From albumentations with MIT License | 5 votes |
def clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)): if img.dtype != np.uint8: raise TypeError("clahe supports only uint8 inputs") clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size) if len(img.shape) == 2 or img.shape[2] == 1: img = clahe_mat.apply(img) else: img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) img[:, :, 0] = clahe_mat.apply(img[:, :, 0]) img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB) return img
Example #14
Source File: arm_resnet.py From craves.ai with GNU General Public License v3.0 | 4 votes |
def __getitem__(self, index): if self.is_train: ids = self.train[index] else: ids = self.valid[index] images = self.dataset.get_image([self.cam_name], [ids]) img_path = images[0] img = load_image(img_path) #CxHxW target = self.load_angles(img_path) original_size = np.array((img.shape[2], img.shape[1])) segmasks = self.dataset.get_seg([self.cam_name], [ids]) segmask = io.imread(segmasks[0]) binary_arm = vdb.get_obj_mask(segmask, self.color) bb = vdb.seg2bb(binary_arm) x0, x1, y0, y1 = bb c = np.array([(x0+x1), (y0+y1)])/2 #s = np.sqrt((y1-y0)*(x1-x0))/120.0 s = np.sqrt((y1-y0)*(x1-x0))/60.0 r = 0 #s = max(x1-x0, y1-y0)/125 if self.is_train: c = c + np.array([-30 + 60*random.random() ,-30 + 60*random.random()]) #random move s *= 0.6*(1+2*random.random())#random scale rf = 15 r = -rf + 2*random.random()*rf#random rotation #r = torch.randn(1).mul_(rf).clamp(-2*rf, 2*rf)[0] if random.random() <= 0.6 else 0 # Color im_rgb = im_to_numpy(img) im_lab = cv2.cvtColor(im_rgb, cv2.COLOR_RGB2LAB) im_lab[:,:,0] = np.clip(im_lab[:,:,0]*(random.uniform(0.3, 1.3)), 0, 255) img = im_to_torch(cv2.cvtColor(im_lab, cv2.COLOR_LAB2RGB)) if random.random() <= 0.5: img = torch.from_numpy(fliplr(img.numpy())).float() inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r) inp = color_normalize(inp, self.mean, self.std) return inp, target