Python cv2.COLOR_LAB2BGR Examples
The following are 18
code examples of cv2.COLOR_LAB2BGR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: neural_style.py From neural-style-tf with GNU General Public License v3.0 | 7 votes |
def convert_to_original_colors(content_img, stylized_img): content_img = postprocess(content_img) stylized_img = postprocess(stylized_img) if args.color_convert_type == 'yuv': cvt_type = cv2.COLOR_BGR2YUV inv_cvt_type = cv2.COLOR_YUV2BGR elif args.color_convert_type == 'ycrcb': cvt_type = cv2.COLOR_BGR2YCR_CB inv_cvt_type = cv2.COLOR_YCR_CB2BGR elif args.color_convert_type == 'luv': cvt_type = cv2.COLOR_BGR2LUV inv_cvt_type = cv2.COLOR_LUV2BGR elif args.color_convert_type == 'lab': cvt_type = cv2.COLOR_BGR2LAB inv_cvt_type = cv2.COLOR_LAB2BGR content_cvt = cv2.cvtColor(content_img, cvt_type) stylized_cvt = cv2.cvtColor(stylized_img, cvt_type) c1, _, _ = cv2.split(stylized_cvt) _, c2, c3 = cv2.split(content_cvt) merged = cv2.merge((c1, c2, c3)) dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32) dst = preprocess(dst) return dst
Example #2
Source File: utils.py From PHiSeg-code with Apache License 2.0 | 6 votes |
def histogram_equalization(img): lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # -----Splitting the LAB image to different channels------------------------- l, a, b = cv2.split(lab) # -----Applying CLAHE to L-channel------------------------------------------- clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) cl = clahe.apply(l) # -----Merge the CLAHE enhanced L-channel with the a and b channel----------- limg = cv2.merge((cl, a, b)) # -----Converting image from LAB Color model to RGB model-------------------- final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) return final
Example #3
Source File: omnirobot_simulator_server.py From robotics-rl-srl with MIT License | 6 votes |
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False): """ render the different environment luminosity """ # variate luminosity and color origin_image_LAB = cv2.cvtColor( origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F) origin_image_LAB[:, :, 0] = origin_image_LAB[:, :, 0] * (np.random.randn() * noise_var + 1.0) origin_image_LAB[:, :, 1] = origin_image_LAB[:, :, 1] * (np.random.randn() * noise_var + 1.0) origin_image_LAB[:, :, 2] = origin_image_LAB[:, :, 2] * (np.random.randn() * noise_var + 1.0) out_image = cv2.cvtColor( origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3) return out_image
Example #4
Source File: phiseg_makegif_samples.py From PHiSeg-code with Apache License 2.0 | 6 votes |
def histogram_equalization(img): lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # -----Splitting the LAB image to different channels------------------------- l, a, b = cv2.split(lab) # -----Applying CLAHE to L-channel------------------------------------------- clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) cl = clahe.apply(l) # -----Merge the CLAHE enhanced L-channel with the a and b channel----------- limg = cv2.merge((cl, a, b)) # -----Converting image from LAB Color model to RGB model-------------------- final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) return final
Example #5
Source File: image.py From surface-crack-detection with MIT License | 6 votes |
def equalize_light(image, limit=3, grid=(7,7), gray=False): if (len(image.shape) == 2): image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) gray = True clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=grid) lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) l, a, b = cv2.split(lab) cl = clahe.apply(l) limg = cv2.merge((cl,a,b)) image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) if gray: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return np.uint8(image)
Example #6
Source File: image.py From OverwatchDataAnalysis with GNU General Public License v3.0 | 5 votes |
def increase_contrast(img): """ Increase contrast of an RGB image @Author: Appcell @param img: image to be processed @return: a numpy.ndarray object of this image """ lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) l, a, b = cv2.split(lab) clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(4, 4)) cl = clahe.apply(l) limg = cv2.merge((cl,a,b)) final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) return final
Example #7
Source File: image_processing.py From kaggle-dsb2018 with Apache License 2.0 | 5 votes |
def rgb_clahe(in_rgb_img): bgr = in_rgb_img[:,:,[2,1,0]] # flip r and b lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) lab[:,:,0] = clahe.apply(lab[:,:,0]) bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) return bgr[:,:,[2,1,0]]
Example #8
Source File: color_transfer.py From pyimagesearch with GNU General Public License v3.0 | 5 votes |
def color_transfer(source, target): source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32") target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32") (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source) (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target) (l, a, b) = cv2.split(target) l -= lMeanTar a -= aMeanTar b -= bMeanTar l = (lStdTar / lStdSrc) * l a = (aStdTar / aStdSrc) * a b = (bStdTar / bStdSrc) * b l += lMeanSrc a += aMeanSrc b += bMeanSrc l = np.clip(l, 0, 255) a = np.clip(a, 0, 255) b = np.clip(b, 0, 255) transfer = cv2.merge([l, a, b]) transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR) return transfer
Example #9
Source File: preprocess.py From pytorch-segmentation with MIT License | 5 votes |
def clahe(img, clip=2, grid=8): img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) _clahe = cv2.createCLAHE(clipLimit=clip, tileGridSize=(grid, grid)) img_yuv[:, :, 0] = _clahe.apply(img_yuv[:, :, 0]) img_equ = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2BGR) return img_equ
Example #10
Source File: inference_utils.py From rpg_e2vid with GNU General Public License v3.0 | 5 votes |
def upsample_color_image(grayscale_highres, color_lowres_bgr, colorspace='LAB'): """ Generate a high res color image from a high res grayscale image, and a low res color image, using the trick described in: http://www.planetary.org/blogs/emily-lakdawalla/2013/04231204-image-processing-colorizing-images.html """ assert(len(grayscale_highres.shape) == 2) assert(len(color_lowres_bgr.shape) == 3 and color_lowres_bgr.shape[2] == 3) if colorspace == 'LAB': # convert color image to LAB space lab = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2LAB) # replace lightness channel with the highres image lab[:, :, 0] = grayscale_highres # convert back to BGR color_highres_bgr = cv2.cvtColor(src=lab, code=cv2.COLOR_LAB2BGR) elif colorspace == 'HSV': # convert color image to HSV space hsv = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HSV) # replace value channel with the highres image hsv[:, :, 2] = grayscale_highres # convert back to BGR color_highres_bgr = cv2.cvtColor(src=hsv, code=cv2.COLOR_HSV2BGR) elif colorspace == 'HLS': # convert color image to HLS space hls = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HLS) # replace lightness channel with the highres image hls[:, :, 1] = grayscale_highres # convert back to BGR color_highres_bgr = cv2.cvtColor(src=hls, code=cv2.COLOR_HLS2BGR) return color_highres_bgr
Example #11
Source File: color_transfer.py From DeepFaceLab with GNU General Public License v3.0 | 5 votes |
def color_transfer_mix(img_src,img_trg): img_src = np.clip(img_src*255.0, 0, 255).astype(np.uint8) img_trg = np.clip(img_trg*255.0, 0, 255).astype(np.uint8) img_src_lab = cv2.cvtColor(img_src, cv2.COLOR_BGR2LAB) img_trg_lab = cv2.cvtColor(img_trg, cv2.COLOR_BGR2LAB) rct_light = np.clip ( linear_color_transfer(img_src_lab[...,0:1].astype(np.float32)/255.0, img_trg_lab[...,0:1].astype(np.float32)/255.0 )[...,0]*255.0, 0, 255).astype(np.uint8) img_src_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8) img_src_lab = cv2.cvtColor(img_src_lab, cv2.COLOR_LAB2BGR) img_trg_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8) img_trg_lab = cv2.cvtColor(img_trg_lab, cv2.COLOR_LAB2BGR) img_rct = color_transfer_sot( img_src_lab.astype(np.float32), img_trg_lab.astype(np.float32) ) img_rct = np.clip(img_rct, 0, 255).astype(np.uint8) img_rct = cv2.cvtColor(img_rct, cv2.COLOR_BGR2LAB) img_rct[...,0] = rct_light img_rct = cv2.cvtColor(img_rct, cv2.COLOR_LAB2BGR) return (img_rct / 255.0).astype(np.float32)
Example #12
Source File: merge_faces_larger.py From df with Mozilla Public License 2.0 | 5 votes |
def transfer_avg_color(img_old,img_new): assert(img_old.shape==img_new.shape) source = cv2.cvtColor(img_old, cv2.COLOR_BGR2LAB).astype("float32") target = cv2.cvtColor(img_new, cv2.COLOR_BGR2LAB).astype("float32") (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source) (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target) (l, a, b) = cv2.split(target) l -= lMeanTar a -= aMeanTar b -= bMeanTar l = (lStdTar / lStdSrc) * l a = (aStdTar / aStdSrc) * a b = (bStdTar / bStdSrc) * b l += lMeanSrc a += aMeanSrc b += bMeanSrc l = numpy.clip(l, 0, 255) a = numpy.clip(a, 0, 255) b = numpy.clip(b, 0, 255) transfer = cv2.merge([l, a, b]) transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR) return transfer
Example #13
Source File: common.py From pytorch-faster-rcnn with MIT License | 5 votes |
def light(im1_name, im2_name): # im1 im = cv2.imread(im1_name) im = im.astype(np.float32) im /= 255. im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB) l = im_lab[:, :, 0] L1_mean = np.mean(l) L1_std = np.std(l) # im2 im = cv2.imread(im2_name) im = im.astype(np.float32) im /= 255. im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB) l = im_lab[:, :, 0] L2_mean = np.mean(l) L2_std = np.std(l) if L2_std != 0: l = (l - L2_mean) / L2_std * L1_std + L1_mean l = l[:, :, np.newaxis] im_lab = np.concatenate((l, im_lab[:, :, 1:]), axis=2) im = cv2.cvtColor(im_lab, cv2.COLOR_LAB2BGR) im *= 255. return im
Example #14
Source File: img_util.py From CvStudio with MIT License | 5 votes |
def correct_lightness(img: np.ndarray): if len(np.shape(img)) == 3: img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) l, a, b = cv2.split(img_lab) clahe = cv2.createCLAHE(clipLimit=40.0, tileGridSize=(4, 4)) l = clahe.apply(l) img = cv2.merge((l, a, b)) img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR) return img
Example #15
Source File: image.py From BirdCLEF-Baseline with MIT License | 5 votes |
def lightness(img, amount=0.25): try: # Only works with BGR images lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) lab[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount) lab[:, :, 0].clip(0, 255) img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) except: pass return img
Example #16
Source File: Demo_OpenCV_Simple_GUI.py From PySimpleGUI with GNU Lesser General Public License v3.0 | 4 votes |
def main(): sg.theme('LightGreen') # define the window layout layout = [ [sg.Text('OpenCV Demo', size=(60, 1), justification='center')], [sg.Image(filename='', key='-IMAGE-')], [sg.Radio('None', 'Radio', True, size=(10, 1))], [sg.Radio('threshold', 'Radio', size=(10, 1), key='-THRESH-'), sg.Slider((0, 255), 128, 1, orientation='h', size=(40, 15), key='-THRESH SLIDER-')], [sg.Radio('canny', 'Radio', size=(10, 1), key='-CANNY-'), sg.Slider((0, 255), 128, 1, orientation='h', size=(20, 15), key='-CANNY SLIDER A-'), sg.Slider((0, 255), 128, 1, orientation='h', size=(20, 15), key='-CANNY SLIDER B-')], [sg.Radio('blur', 'Radio', size=(10, 1), key='-BLUR-'), sg.Slider((1, 11), 1, 1, orientation='h', size=(40, 15), key='-BLUR SLIDER-')], [sg.Radio('hue', 'Radio', size=(10, 1), key='-HUE-'), sg.Slider((0, 225), 0, 1, orientation='h', size=(40, 15), key='-HUE SLIDER-')], [sg.Radio('enhance', 'Radio', size=(10, 1), key='-ENHANCE-'), sg.Slider((1, 255), 128, 1, orientation='h', size=(40, 15), key='-ENHANCE SLIDER-')], [sg.Button('Exit', size=(10, 1))] ] # create the window and show it without the plot window = sg.Window('OpenCV Integration', layout, location=(800, 400)) cap = cv2.VideoCapture(0) while True: event, values = window.read(timeout=20) if event == 'Exit' or event == sg.WIN_CLOSED: break ret, frame = cap.read() if values['-THRESH-']: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)[:, :, 0] frame = cv2.threshold(frame, values['-THRESH SLIDER-'], 255, cv2.THRESH_BINARY)[1] elif values['-CANNY-']: frame = cv2.Canny(frame, values['-CANNY SLIDER A-'], values['-CANNY SLIDER B-']) elif values['-BLUR-']: frame = cv2.GaussianBlur(frame, (21, 21), values['-BLUR SLIDER-']) elif values['-HUE-']: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) frame[:, :, 0] += int(values['-HUE SLIDER-']) frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR) elif values['-ENHANCE-']: enh_val = values['-ENHANCE SLIDER-'] / 40 clahe = cv2.createCLAHE(clipLimit=enh_val, tileGridSize=(8, 8)) lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) lab[:, :, 0] = clahe.apply(lab[:, :, 0]) frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) imgbytes = cv2.imencode('.png', frame)[1].tobytes() window['-IMAGE-'].update(data=imgbytes) window.close()
Example #17
Source File: Demo_OpenCV_Simple_GUI.py From PySimpleGUI with GNU Lesser General Public License v3.0 | 4 votes |
def main(): sg.ChangeLookAndFeel('LightGreen') # define the window layout layout = [[sg.Text('OpenCV Demo', size=(40, 1), justification='center')], [sg.Image(filename='', key='image')], [sg.Radio('None', 'Radio', True, size=(10, 1))], [sg.Radio('threshold', 'Radio', size=(10, 1), key='thresh'), sg.Slider((0, 255), 128, 1, orientation='h', size=(40, 15), key='thresh_slider')], [sg.Radio('canny', 'Radio', size=(10, 1), key='canny'), sg.Slider((0, 255), 128, 1, orientation='h', size=(20, 15), key='canny_slider_a'), sg.Slider((0, 255), 128, 1, orientation='h', size=(20, 15), key='canny_slider_b')], [sg.Radio('contour', 'Radio', size=(10, 1), key='contour'), sg.Slider((0, 255), 128, 1, orientation='h', size=(20, 15), key='contour_slider'), sg.Slider((0, 255), 80, 1, orientation='h', size=(20, 15), key='base_slider')], [sg.Radio('blur', 'Radio', size=(10, 1), key='blur'), sg.Slider((1, 11), 1, 1, orientation='h', size=(40, 15), key='blur_slider')], [sg.Radio('hue', 'Radio', size=(10, 1), key='hue'), sg.Slider((0, 225), 0, 1, orientation='h', size=(40, 15), key='hue_slider')], [sg.Radio('enhance', 'Radio', size=(10, 1), key='enhance'), sg.Slider((1, 255), 128, 1, orientation='h', size=(40, 15), key='enhance_slider')], [sg.Button('Exit', size=(10, 1))]] # create the window and show it without the plot window = sg.Window('Demo Application - OpenCV Integration', location=(800, 400)) window.Layout(layout).Finalize() cap = cv2.VideoCapture(0) while True: event, values = window.Read(timeout=0, timeout_key='timeout') if event == 'Exit' or event is None: sys.exit(0) ret, frame = cap.read() if values['thresh']: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)[:, :, 0] _, frame = cv2.threshold(frame, values['thresh_slider'], 255, cv2.THRESH_BINARY) if values['canny']: frame = cv2.Canny(frame, values['canny_slider_a'], values['canny_slider_b']) if values['blur']: frame = cv2.GaussianBlur(frame, (21, 21), values['blur_slider']) if values['hue']: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) frame[:, :, 0] += values['hue_slider'] frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR) if values['enhance']: enh_val = values['enhance_slider'] / 40 clahe = cv2.createCLAHE(clipLimit=enh_val, tileGridSize=(8, 8)) lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) lab[:, :, 0] = clahe.apply(lab[:, :, 0]) frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) if values['contour']: hue = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hue = cv2.GaussianBlur(hue, (21, 21), 1) hue = cv2.inRange(hue, np.array([values['contour_slider'], values['base_slider'], 40]), np.array([values['contour_slider'] + 30, 255, 220])) _, cnts, _ = cv2.findContours(hue, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(frame, cnts, -1, (0, 0, 255), 2) imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto window.FindElement('image').Update(data=imgbytes)
Example #18
Source File: fixed_size.py From Detectron-PYTORCH with Apache License 2.0 | 4 votes |
def distort_color(im): # distort brightness hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) h_, s_, v_ = cv2.split(hsv) v_ += np.random.randint(-16, 16) v_[v_ > 255] = 255 v_[v_ < 0] = 0 hsv = cv2.merge((h_, s_, v_)) im = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) # distort contrast # """ # From TF source code # For each channel, this Op computes the mean of the image pixels in the # channel and then adjusts each component `x` of each pixel to # `(x - mean) * contrast_factor + mean`. # """ im = im.astype(np.float32) b, g, r = cv2.split(im) # factor = (np.random.rand() + 0.5) factor = np.random.uniform(0.75, 1.25) b = (b - b.mean()) * factor + b.mean() b[b > 255] = 255 b[b < 0] = 0 # factor = (np.random.rand() + 0.5) g = (g - g.mean()) * factor + g.mean() g[g > 255] = 255 g[g < 0] = 0 # factor = (np.random.rand() + 0.5) r = (r - r.mean()) * factor + r.mean() r[r > 255] = 255 r[r < 0] = 0 im = cv2.merge((b, g, r)) # im = im.astype(np.uint8) # clip_value = np.random.rand() * 3.0 # clahe = cv2.createCLAHE(clipLimit=clip_value, tileGridSize=(8, 8)) # lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space # l, a, b = cv2.split(lab) # split on 3 different channels # l2 = clahe.apply(l) # apply CLAHE to the L-channel # lab = cv2.merge((l2, a, b)) # merge channels # im = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR # im = im.astype(np.float32) return im