Python cv2.COLOR_YUV2BGR Examples
The following are 8
code examples of cv2.COLOR_YUV2BGR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: neural_style.py From neural-style-tf with GNU General Public License v3.0 | 7 votes |
def convert_to_original_colors(content_img, stylized_img): content_img = postprocess(content_img) stylized_img = postprocess(stylized_img) if args.color_convert_type == 'yuv': cvt_type = cv2.COLOR_BGR2YUV inv_cvt_type = cv2.COLOR_YUV2BGR elif args.color_convert_type == 'ycrcb': cvt_type = cv2.COLOR_BGR2YCR_CB inv_cvt_type = cv2.COLOR_YCR_CB2BGR elif args.color_convert_type == 'luv': cvt_type = cv2.COLOR_BGR2LUV inv_cvt_type = cv2.COLOR_LUV2BGR elif args.color_convert_type == 'lab': cvt_type = cv2.COLOR_BGR2LAB inv_cvt_type = cv2.COLOR_LAB2BGR content_cvt = cv2.cvtColor(content_img, cvt_type) stylized_cvt = cv2.cvtColor(stylized_img, cvt_type) c1, _, _ = cv2.split(stylized_cvt) _, c2, c3 = cv2.split(content_cvt) merged = cv2.merge((c1, c2, c3)) dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32) dst = preprocess(dst) return dst
Example #2
Source File: visualization.py From End-to-End-Learning-for-Self-Driving-Cars with Apache License 2.0 | 5 votes |
def visualize(image, mask): # cast image from yuv to brg. image = cv2.cvtColor(image, cv2.COLOR_YUV2BGR) max_val = np.max(mask) min_val = np.min(mask) mask = (mask - min_val) / (max_val - min_val) mask = (mask * 255.0).astype(np.uint8) overlay = np.copy(image) overlay[:, :, 1] = cv2.add(image[:, :, 1], mask) return image, mask, overlay
Example #3
Source File: transforms.py From kaggle_carvana_segmentation with MIT License | 5 votes |
def __call__(self, im): img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV) clahe = cv2.createCLAHE(clipLimit=self.clipLimit, tileGridSize=self.tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) return img_output
Example #4
Source File: img_util.py From CvStudio with MIT License | 5 votes |
def histogram_equalization2(img: np.ndarray): if len(np.shape(img)) == 3: img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # equalize the histogram of the Y channel img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0]) # convert the YUV image back to RGB format img = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) return img
Example #5
Source File: clahe_histogram_equalization.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def equalize_clahe_color_yuv(img): """Equalize the image splitting it after conversion to YUV and applying CLAHE to the Y channel and merging the channels and convert back to BGR """ cla = cv2.createCLAHE(clipLimit=4.0) Y, U, V = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2YUV)) eq_Y = cla.apply(Y) eq_image = cv2.cvtColor(cv2.merge([eq_Y, U, V]), cv2.COLOR_YUV2BGR) return eq_image
Example #6
Source File: augmentations.py From segmentation-networks-benchmark with MIT License | 5 votes |
def __call__(self, im): img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV) clahe = cv2.createCLAHE(clipLimit=self.clipLimit, tileGridSize=self.tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) return img_output
Example #7
Source File: testNet.py From calc with BSD 3-Clause "New" or "Revised" License | 4 votes |
def computeForwardPasses(nets, alexnet, im, transformer, transformer_alex, resize_net): """ Compute the forward passes for CALC and optionallly alexnet """ img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV) img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) im = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) alex_conv3 = None t_alex = -1 imcp = np.copy(im) # for AlexNet if im.shape[2] > 1: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) if not resize_net: im = cv2.resize(im, (160, 120), interpolation = cv2.INTER_CUBIC) else: transformer = caffe.io.Transformer({'X1':(1,1,im.shape[0],im.shape[1])}) transformer.set_raw_scale('X1',1./255) for net in nets: x1 = net.blobs['X1'] x1.reshape(1,1,im.shape[0],im.shape[1]) net.reshape() descr = [] t_calc = [] for net in nets: t0 = time() net.blobs['X1'].data[...] = transformer.preprocess('X1', im) net.forward() d = np.copy(net.blobs['descriptor'].data[...]) t_calc.append(time() - t0) d /= np.linalg.norm(d) descr.append(d) if alexnet is not None: im2 = cv2.resize(imcp, (227,227), interpolation=cv2.INTER_CUBIC) t0 = time() alexnet.blobs['data'].data[...] = transformer_alex.preprocess('data', im2) alexnet.forward() alex_conv3 = np.copy(alexnet.blobs['conv3'].data[...]) alex_conv3 = np.reshape(alex_conv3, (alex_conv3.size, 1)) global first_it global A if first_it: np.random.seed(0) A = np.random.randn(descr[0].size, alex_conv3.size) # For Gaussian random projection first_it = False alex_conv3 = np.matmul(A, alex_conv3) alex_conv3 = np.reshape(alex_conv3, (1, alex_conv3.size)) t_alex = time() - t0 alex_conv3 /= np.linalg.norm(alex_conv3) return descr, alex_conv3, t_calc, t_alex
Example #8
Source File: BlindWatermark.py From BlindWatermark with GNU General Public License v3.0 | 4 votes |
def embed(self,filename): embed_ha_Y_block=self.ha_Y_block.copy() embed_ha_U_block=self.ha_U_block.copy() embed_ha_V_block=self.ha_V_block.copy() self.random_dct = np.random.RandomState(self.random_seed_dct) index = np.arange(self.block_shape[0]*self.block_shape[1]) for i in range(self.length): self.random_dct.shuffle(index) embed_ha_Y_block[self.block_add_index0[i],self.block_add_index1[i]] = self.block_add_wm(embed_ha_Y_block[self.block_add_index0[i],self.block_add_index1[i]],index,i) embed_ha_U_block[self.block_add_index0[i],self.block_add_index1[i]] = self.block_add_wm(embed_ha_U_block[self.block_add_index0[i],self.block_add_index1[i]],index,i) embed_ha_V_block[self.block_add_index0[i],self.block_add_index1[i]] = self.block_add_wm(embed_ha_V_block[self.block_add_index0[i],self.block_add_index1[i]],index,i) embed_ha_Y_part = np.concatenate(embed_ha_Y_block,1) embed_ha_Y_part = np.concatenate(embed_ha_Y_part,1) embed_ha_U_part = np.concatenate(embed_ha_U_block,1) embed_ha_U_part = np.concatenate(embed_ha_U_part,1) embed_ha_V_part = np.concatenate(embed_ha_V_block,1) embed_ha_V_part = np.concatenate(embed_ha_V_part,1) embed_ha_Y = self.ha_Y.copy() embed_ha_Y[:self.part_shape[0],:self.part_shape[1]] = embed_ha_Y_part embed_ha_U = self.ha_U.copy() embed_ha_U[:self.part_shape[0],:self.part_shape[1]] = embed_ha_U_part embed_ha_V = self.ha_V.copy() embed_ha_V[:self.part_shape[0],:self.part_shape[1]] = embed_ha_V_part for i in range(self.dwt_deep): (cH, cV, cD) = self.coeffs_Y[-1*(i+1)] embed_ha_Y = idwt2((embed_ha_Y.copy(), (cH, cV, cD)),"haar") #其idwt得到父级的ha (cH, cV, cD) = self.coeffs_U[-1*(i+1)] embed_ha_U = idwt2((embed_ha_U.copy(), (cH, cV, cD)),"haar") #其idwt得到父级的ha (cH, cV, cD) = self.coeffs_V[-1*(i+1)] embed_ha_V = idwt2((embed_ha_V.copy(), (cH, cV, cD)),"haar") #其idwt得到父级的ha #最上级的ha就是嵌入水印的图,即for运行完的ha embed_img_YUV = np.zeros(self.ori_img_YUV.shape,dtype=np.float32) embed_img_YUV[:,:,0] = embed_ha_Y embed_img_YUV[:,:,1] = embed_ha_U embed_img_YUV[:,:,2] = embed_ha_V embed_img_YUV=embed_img_YUV[:self.ori_img_shape[0],:self.ori_img_shape[1]] if self.color_mod == 'RGB': embed_img = embed_img_YUV elif self.color_mod == 'YUV': embed_img = cv2.cvtColor(embed_img_YUV,cv2.COLOR_YUV2BGR) embed_img[embed_img>255]=255 embed_img[embed_img<0]=0 cv2.imwrite(filename,embed_img)