Python cv2.COLOR_BGR2Lab() Examples
The following are 5
code examples of cv2.COLOR_BGR2Lab().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: imutils.py From craves.ai with GNU General Public License v3.0 | 5 votes |
def create_mask(self, img, color): img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab) if color == 'green': threshold = [(20, 0, 128), (235, 128, 255)] elif color == 'white': threshold = [(100, 110, 110), (200, 140, 140)] else: raise Exception('Color undefined') mask = cv2.inRange(img, threshold[0], threshold[1]) # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7)) # mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) # mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) mask = mask > 0 # img = cv2.cvtColor(img, cv2.COLOR_YCR_CB2BGR) # thres_img = np.zeros_like(img, np.uint8) # thres_img[mask] = img[mask] binary_img = np.zeros((img.shape[0],img.shape[1]), np.uint8) binary_img[mask] = 255 # cv2.imshow('img', binary_img) # cv2.waitKey(0) # exit(0) return mask
Example #2
Source File: dat.py From pyCFTrackers with MIT License | 5 votes |
def init(self,first_frame,bbox): bbox=np.array(bbox).astype(np.int64) x,y,w,h=tuple(bbox) self._scale_factor=min(1,round(10*self.config.img_scale_target_diagonal/cv2.norm(np.array([w,h])))/10.) self._center=(self._scale_factor*(x+(w-1)/2),self._scale_factor*(y+(h-1)/2)) self.w,self.h=int(w*self._scale_factor),int(h*self._scale_factor) self._target_sz=(self.w,self.h) img=cv2.resize(first_frame,None,fx=self._scale_factor,fy=self._scale_factor) if self.config.color_space=='lab': img=cv2.cvtColor(img,cv2.COLOR_BGR2Lab) elif self.config.color_space=='hsv': img=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) img[:, :, 0] = (img[:, :, 0] * 256 / 180) img = img.astype(np.uint8) else: pass surr_sz=(int(np.floor(self.config.surr_win_factor*self.w)),int(np.floor(self.config.surr_win_factor*self.h))) surr_rect=pos2rect(self._center,surr_sz,(img.shape[1],img.shape[0])) obj_rect_surr=pos2rect(self._center,self._target_sz,(img.shape[1],img.shape[0])) obj_rect_surr=(obj_rect_surr[0]-surr_rect[0], obj_rect_surr[1]-surr_rect[1], obj_rect_surr[2],obj_rect_surr[3]) surr_win=get_sub_window(img,self._center,surr_sz) self.bin_mapping=get_bin_mapping(self.config.num_bins) self.prob_lut_,prob_map=get_foreground_background_probs(surr_win,obj_rect_surr, self.config.num_bins,self.bin_mapping) self._prob_lut_distractor=copy.deepcopy(self.prob_lut_) self._prob_lut_masked=copy.deepcopy(self.prob_lut_) self.adaptive_threshold_=get_adaptive_threshold(prob_map,obj_rect_surr) self.target_pos_history.append((self._center[0]/self._scale_factor,self._center[1]/self._scale_factor)) self.target_sz_history.append((self._target_sz[0]/self._scale_factor,self._target_sz[1]/self._scale_factor))
Example #3
Source File: clahe_histogram_equalization.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def equalize_clahe_color_lab(img): """Equalize the image splitting it after conversion to LAB and applying CLAHE to the L channel and merging the channels and convert back to BGR """ cla = cv2.createCLAHE(clipLimit=4.0) L, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab)) eq_L = cla.apply(L) eq_image = cv2.cvtColor(cv2.merge([eq_L, a, b]), cv2.COLOR_Lab2BGR) return eq_image
Example #4
Source File: img_proc.py From dlupi-heteroscedastic-dropout with MIT License | 5 votes |
def convert_rgb2lab( images, batch_size): # [128, 3, 32, 32] """ INPUT: images should be NCHW AB channel values are in the range [-128,128] L channel values are in the range [0,100] """ images_np = images.numpy() images_np_nhwc = np.rollaxis(images_np,1,4) # NCHW to NHWC images_LAB = torch.FloatTensor( images.size() ).zero_() # empty NCHW array to hold LAB for i in range( images_np_nhwc.shape[0] ): img_lab = cv2.cvtColor(images_np_nhwc[i], cv2.COLOR_BGR2Lab ) # HWC images_LAB[i] = torch.from_numpy( np.rollaxis( img_lab, 2, 0 ) ) # to CHW images_L = images_LAB[:,0,:,:].contiguous().view(images.size(0), 1, images.size(2), images.size(3) ) # channel 0 images_AB = images_LAB[:,1:,:,:] # channels 1 and 2 return images_L, images_AB
Example #5
Source File: makeup.py From face-parsing.PyTorch with MIT License | 4 votes |
def hair(image, parsing, part=17, color=[230, 50, 20]): b, g, r = color #[10, 50, 250] # [10, 250, 10] tar_color = np.zeros_like(image) tar_color[:, :, 0] = b tar_color[:, :, 1] = g tar_color[:, :, 2] = r image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV) if part == 12 or part == 13: image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2] else: image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1] changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR) if part == 17: changed = sharpen(changed) changed[parsing != part] = image[parsing != part] # changed = cv2.resize(changed, (512, 512)) return changed # # def lip(image, parsing, part=17, color=[230, 50, 20]): # b, g, r = color #[10, 50, 250] # [10, 250, 10] # tar_color = np.zeros_like(image) # tar_color[:, :, 0] = b # tar_color[:, :, 1] = g # tar_color[:, :, 2] = r # # image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab) # il, ia, ib = cv2.split(image_lab) # # tar_lab = cv2.cvtColor(tar_color, cv2.COLOR_BGR2Lab) # tl, ta, tb = cv2.split(tar_lab) # # image_lab[:, :, 0] = np.clip(il - np.mean(il) + tl, 0, 100) # image_lab[:, :, 1] = np.clip(ia - np.mean(ia) + ta, -127, 128) # image_lab[:, :, 2] = np.clip(ib - np.mean(ib) + tb, -127, 128) # # # changed = cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR) # # if part == 17: # changed = sharpen(changed) # # changed[parsing != part] = image[parsing != part] # # changed = cv2.resize(changed, (512, 512)) # return changed