Python cv2.COLOR_BGR2YUV Examples
The following are 14
code examples of cv2.COLOR_BGR2YUV().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: neural_style.py From neural-style-tf with GNU General Public License v3.0 | 7 votes |
def convert_to_original_colors(content_img, stylized_img): content_img = postprocess(content_img) stylized_img = postprocess(stylized_img) if args.color_convert_type == 'yuv': cvt_type = cv2.COLOR_BGR2YUV inv_cvt_type = cv2.COLOR_YUV2BGR elif args.color_convert_type == 'ycrcb': cvt_type = cv2.COLOR_BGR2YCR_CB inv_cvt_type = cv2.COLOR_YCR_CB2BGR elif args.color_convert_type == 'luv': cvt_type = cv2.COLOR_BGR2LUV inv_cvt_type = cv2.COLOR_LUV2BGR elif args.color_convert_type == 'lab': cvt_type = cv2.COLOR_BGR2LAB inv_cvt_type = cv2.COLOR_LAB2BGR content_cvt = cv2.cvtColor(content_img, cvt_type) stylized_cvt = cv2.cvtColor(stylized_img, cvt_type) c1, _, _ = cv2.split(stylized_cvt) _, c2, c3 = cv2.split(content_cvt) merged = cv2.merge((c1, c2, c3)) dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32) dst = preprocess(dst) return dst
Example #2
Source File: train_yadav.py From robust_physical_perturbations with MIT License | 6 votes |
def transform_image(image,ang_range,shear_range,trans_range): # Rotation ang_rot = np.random.uniform(ang_range)-ang_range/2 rows,cols,ch = image.shape Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1) # Translation tr_x = trans_range*np.random.uniform()-trans_range/2 tr_y = trans_range*np.random.uniform()-trans_range/2 Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]]) # Shear pts1 = np.float32([[5,5],[20,5],[5,20]]) pt1 = 5+shear_range*np.random.uniform()-shear_range/2 pt2 = 20+shear_range*np.random.uniform()-shear_range/2 pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]]) shear_M = cv2.getAffineTransform(pts1,pts2) image = cv2.warpAffine(image,Rot_M,(cols,rows)) image = cv2.warpAffine(image,Trans_M,(cols,rows)) image = cv2.warpAffine(image,shear_M,(cols,rows)) image = pre_process_image(image.astype(np.uint8)) #image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) #image = image[:,:,0] #image = cv2.resize(image, (img_resize,img_resize),interpolation = cv2.INTER_CUBIC) return image
Example #3
Source File: calc_accuracy_yadav.py From robust_physical_perturbations with MIT License | 5 votes |
def pre_process_image(image): #image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) #print(image) image[:,:,0] = cv2.equalizeHist(image[:,:,0]) image[:,:,1] = cv2.equalizeHist(image[:,:,1]) image[:,:,2] = cv2.equalizeHist(image[:,:,2]) image = image/255. - 0.5 return image
Example #4
Source File: train_yadav.py From robust_physical_perturbations with MIT License | 5 votes |
def pre_process_image(image): #image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) #print(image) #image[:,:,0] = cv2.equalizeHist(image[:,:,0]) #image[:,:,1] = cv2.equalizeHist(image[:,:,1]) #image[:,:,2] = cv2.equalizeHist(image[:,:,2]) image = image/255.-0.5 return image
Example #5
Source File: split_data.py From End-to-End-Learning-for-Self-Driving-Cars with Apache License 2.0 | 5 votes |
def read_an_image(filename): img = cv2.imread(filename) img = cv2.resize(img[-150:], (200, 66)) # BGR space to YUV space img = cv2.cvtColor(img,cv2.COLOR_BGR2YUV) return img
Example #6
Source File: transforms.py From kaggle_carvana_segmentation with MIT License | 5 votes |
def __call__(self, im): img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV) clahe = cv2.createCLAHE(clipLimit=self.clipLimit, tileGridSize=self.tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) return img_output
Example #7
Source File: img_util.py From CvStudio with MIT License | 5 votes |
def histogram_equalization2(img: np.ndarray): if len(np.shape(img)) == 3: img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # equalize the histogram of the Y channel img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0]) # convert the YUV image back to RGB format img = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) return img
Example #8
Source File: clahe_histogram_equalization.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def equalize_clahe_color_yuv(img): """Equalize the image splitting it after conversion to YUV and applying CLAHE to the Y channel and merging the channels and convert back to BGR """ cla = cv2.createCLAHE(clipLimit=4.0) Y, U, V = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2YUV)) eq_Y = cla.apply(Y) eq_image = cv2.cvtColor(cv2.merge([eq_Y, U, V]), cv2.COLOR_YUV2BGR) return eq_image
Example #9
Source File: augmentations.py From segmentation-networks-benchmark with MIT License | 5 votes |
def __call__(self, im): img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV) clahe = cv2.createCLAHE(clipLimit=self.clipLimit, tileGridSize=self.tileGridSize) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) return img_output
Example #10
Source File: mosaicer.py From Mosaicer with MIT License | 5 votes |
def frame_pre_processing(img): img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0]) img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB) return img_output
Example #11
Source File: testNet.py From calc with BSD 3-Clause "New" or "Revised" License | 4 votes |
def computeForwardPasses(nets, alexnet, im, transformer, transformer_alex, resize_net): """ Compute the forward passes for CALC and optionallly alexnet """ img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV) img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) im = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) alex_conv3 = None t_alex = -1 imcp = np.copy(im) # for AlexNet if im.shape[2] > 1: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) if not resize_net: im = cv2.resize(im, (160, 120), interpolation = cv2.INTER_CUBIC) else: transformer = caffe.io.Transformer({'X1':(1,1,im.shape[0],im.shape[1])}) transformer.set_raw_scale('X1',1./255) for net in nets: x1 = net.blobs['X1'] x1.reshape(1,1,im.shape[0],im.shape[1]) net.reshape() descr = [] t_calc = [] for net in nets: t0 = time() net.blobs['X1'].data[...] = transformer.preprocess('X1', im) net.forward() d = np.copy(net.blobs['descriptor'].data[...]) t_calc.append(time() - t0) d /= np.linalg.norm(d) descr.append(d) if alexnet is not None: im2 = cv2.resize(imcp, (227,227), interpolation=cv2.INTER_CUBIC) t0 = time() alexnet.blobs['data'].data[...] = transformer_alex.preprocess('data', im2) alexnet.forward() alex_conv3 = np.copy(alexnet.blobs['conv3'].data[...]) alex_conv3 = np.reshape(alex_conv3, (alex_conv3.size, 1)) global first_it global A if first_it: np.random.seed(0) A = np.random.randn(descr[0].size, alex_conv3.size) # For Gaussian random projection first_it = False alex_conv3 = np.matmul(A, alex_conv3) alex_conv3 = np.reshape(alex_conv3, (1, alex_conv3.size)) t_alex = time() - t0 alex_conv3 /= np.linalg.norm(alex_conv3) return descr, alex_conv3, t_calc, t_alex
Example #12
Source File: BlindWatermark copy.py From BlindWatermark with GNU General Public License v3.0 | 4 votes |
def read_ori_img(self,filename): #傻逼opencv因为数组类型不会变,输入是uint8输出也是uint8,而UV可以是负数且uint8会去掉小数部分 ori_img = cv2.imread(filename).astype(np.float32) self.ori_img_shape = ori_img.shape[:2] if self.color_mod == 'RGB': self.ori_img_YUV = ori_img elif self.color_mod == 'YUV': self.ori_img_YUV = cv2.cvtColor(ori_img, cv2.COLOR_BGR2YUV) if not self.ori_img_YUV.shape[0]%(2**self.dwt_deep)==0: temp = (2**self.dwt_deep)-self.ori_img_YUV.shape[0]%(2**self.dwt_deep) self.ori_img_YUV = np.concatenate((self.ori_img_YUV,np.zeros((temp,self.ori_img_YUV.shape[1],3))),axis=0) if not self.ori_img_YUV.shape[1]%(2**self.dwt_deep)==0: temp = (2**self.dwt_deep)-self.ori_img_YUV.shape[1]%(2**self.dwt_deep) self.ori_img_YUV = np.concatenate((self.ori_img_YUV,np.zeros((self.ori_img_YUV.shape[0],temp,3))),axis=1) assert self.ori_img_YUV.shape[0]%(2**self.dwt_deep)==0 assert self.ori_img_YUV.shape[1]%(2**self.dwt_deep)==0 if self.dwt_deep==1: coeffs_Y = dwt2(self.ori_img_YUV[:,:,0],'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(self.ori_img_YUV[:,:,1],'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(self.ori_img_YUV[:,:,2],'haar') ha_V = coeffs_V[0] self.coeffs_Y = [coeffs_Y[1]] self.coeffs_U = [coeffs_U[1]] self.coeffs_V = [coeffs_V[1]] elif self.dwt_deep>=2: #不希望使用太多级的dwt,2,3次就行了 coeffs_Y = dwt2(self.ori_img_YUV[:,:,0],'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(self.ori_img_YUV[:,:,1],'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(self.ori_img_YUV[:,:,2],'haar') ha_V = coeffs_V[0] self.coeffs_Y = [coeffs_Y[1]] self.coeffs_U = [coeffs_U[1]] self.coeffs_V = [coeffs_V[1]] for i in range(self.dwt_deep-1): coeffs_Y = dwt2(ha_Y,'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(ha_U,'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(ha_V,'haar') ha_V = coeffs_V[0] self.coeffs_Y.append(coeffs_Y[1]) self.coeffs_U.append(coeffs_U[1]) self.coeffs_V.append(coeffs_V[1]) self.ha_Y = ha_Y self.ha_U = ha_U self.ha_V = ha_V self.ha_block_shape = (int(self.ha_Y.shape[0]/self.block_shape[0]),int(self.ha_Y.shape[1]/self.block_shape[1]),self.block_shape[0],self.block_shape[1]) strides = self.ha_Y.itemsize*(np.array([self.ha_Y.shape[1]*self.block_shape[0],self.block_shape[1],self.ha_Y.shape[1],1])) self.ha_Y_block = np.lib.stride_tricks.as_strided(self.ha_Y.copy(),self.ha_block_shape,strides) self.ha_U_block = np.lib.stride_tricks.as_strided(self.ha_U.copy(),self.ha_block_shape,strides) self.ha_V_block = np.lib.stride_tricks.as_strided(self.ha_V.copy(),self.ha_block_shape,strides)
Example #13
Source File: BlindWatermark.py From BlindWatermark with GNU General Public License v3.0 | 4 votes |
def read_ori_img(self,filename): #傻逼opencv因为数组类型不会变,输入是uint8输出也是uint8,而UV可以是负数且uint8会去掉小数部分 ori_img = cv2.imread(filename).astype(np.float32) self.ori_img_shape = ori_img.shape[:2] if self.color_mod == 'RGB': self.ori_img_YUV = ori_img elif self.color_mod == 'YUV': self.ori_img_YUV = cv2.cvtColor(ori_img, cv2.COLOR_BGR2YUV) if not self.ori_img_YUV.shape[0]%(2**self.dwt_deep)==0: temp = (2**self.dwt_deep)-self.ori_img_YUV.shape[0]%(2**self.dwt_deep) self.ori_img_YUV = np.concatenate((self.ori_img_YUV,np.zeros((temp,self.ori_img_YUV.shape[1],3))),axis=0) if not self.ori_img_YUV.shape[1]%(2**self.dwt_deep)==0: temp = (2**self.dwt_deep)-self.ori_img_YUV.shape[1]%(2**self.dwt_deep) self.ori_img_YUV = np.concatenate((self.ori_img_YUV,np.zeros((self.ori_img_YUV.shape[0],temp,3))),axis=1) assert self.ori_img_YUV.shape[0]%(2**self.dwt_deep)==0 assert self.ori_img_YUV.shape[1]%(2**self.dwt_deep)==0 if self.dwt_deep==1: coeffs_Y = dwt2(self.ori_img_YUV[:,:,0],'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(self.ori_img_YUV[:,:,1],'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(self.ori_img_YUV[:,:,2],'haar') ha_V = coeffs_V[0] self.coeffs_Y = [coeffs_Y[1]] self.coeffs_U = [coeffs_U[1]] self.coeffs_V = [coeffs_V[1]] elif self.dwt_deep>=2: #不希望使用太多级的dwt,2,3次就行了 coeffs_Y = dwt2(self.ori_img_YUV[:,:,0],'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(self.ori_img_YUV[:,:,1],'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(self.ori_img_YUV[:,:,2],'haar') ha_V = coeffs_V[0] self.coeffs_Y = [coeffs_Y[1]] self.coeffs_U = [coeffs_U[1]] self.coeffs_V = [coeffs_V[1]] for i in range(self.dwt_deep-1): coeffs_Y = dwt2(ha_Y,'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(ha_U,'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(ha_V,'haar') ha_V = coeffs_V[0] self.coeffs_Y.append(coeffs_Y[1]) self.coeffs_U.append(coeffs_U[1]) self.coeffs_V.append(coeffs_V[1]) self.ha_Y = ha_Y self.ha_U = ha_U self.ha_V = ha_V self.ha_block_shape = (int(self.ha_Y.shape[0]/self.block_shape[0]),int(self.ha_Y.shape[1]/self.block_shape[1]),self.block_shape[0],self.block_shape[1]) strides = self.ha_Y.itemsize*(np.array([self.ha_Y.shape[1]*self.block_shape[0],self.block_shape[1],self.ha_Y.shape[1],1])) self.ha_Y_block = np.lib.stride_tricks.as_strided(self.ha_Y.copy(),self.ha_block_shape,strides) self.ha_U_block = np.lib.stride_tricks.as_strided(self.ha_U.copy(),self.ha_block_shape,strides) self.ha_V_block = np.lib.stride_tricks.as_strided(self.ha_V.copy(),self.ha_block_shape,strides)
Example #14
Source File: BlindWatermark.py From BlindWatermark with GNU General Public License v3.0 | 4 votes |
def read_ori_img(self,filename): #傻逼opencv因为数组类型不会变,输入是uint8输出也是uint8,而UV可以是负数且uint8会去掉小数部分 ori_img = cv2.imread(filename).astype(np.float32) self.ori_img_shape = ori_img.shape[:2] if self.color_mod == 'RGB': self.ori_img_YUV = ori_img elif self.color_mod == 'YUV': self.ori_img_YUV = cv2.cvtColor(ori_img, cv2.COLOR_BGR2YUV) if not self.ori_img_YUV.shape[0]%(2**self.dwt_deep)==0: temp = (2**self.dwt_deep)-self.ori_img_YUV.shape[0]%(2**self.dwt_deep) self.ori_img_YUV = np.concatenate((self.ori_img_YUV,np.zeros((temp,self.ori_img_YUV.shape[1],3))),axis=0) if not self.ori_img_YUV.shape[1]%(2**self.dwt_deep)==0: temp = (2**self.dwt_deep)-self.ori_img_YUV.shape[1]%(2**self.dwt_deep) self.ori_img_YUV = np.concatenate((self.ori_img_YUV,np.zeros((self.ori_img_YUV.shape[0],temp,3))),axis=1) assert self.ori_img_YUV.shape[0]%(2**self.dwt_deep)==0 assert self.ori_img_YUV.shape[1]%(2**self.dwt_deep)==0 if self.dwt_deep==1: coeffs_Y = dwt2(self.ori_img_YUV[:,:,0],'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(self.ori_img_YUV[:,:,1],'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(self.ori_img_YUV[:,:,2],'haar') ha_V = coeffs_V[0] self.coeffs_Y = [coeffs_Y[1]] self.coeffs_U = [coeffs_U[1]] self.coeffs_V = [coeffs_V[1]] elif self.dwt_deep>=2: #不希望使用太多级的dwt,2,3次就行了 coeffs_Y = dwt2(self.ori_img_YUV[:,:,0],'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(self.ori_img_YUV[:,:,1],'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(self.ori_img_YUV[:,:,2],'haar') ha_V = coeffs_V[0] self.coeffs_Y = [coeffs_Y[1]] self.coeffs_U = [coeffs_U[1]] self.coeffs_V = [coeffs_V[1]] for i in range(self.dwt_deep-1): coeffs_Y = dwt2(ha_Y,'haar') ha_Y = coeffs_Y[0] coeffs_U = dwt2(ha_U,'haar') ha_U = coeffs_U[0] coeffs_V = dwt2(ha_V,'haar') ha_V = coeffs_V[0] self.coeffs_Y.append(coeffs_Y[1]) self.coeffs_U.append(coeffs_U[1]) self.coeffs_V.append(coeffs_V[1]) self.ha_Y = ha_Y self.ha_U = ha_U self.ha_V = ha_V self.ha_block_shape = (int(self.ha_Y.shape[0]/self.block_shape[0]),int(self.ha_Y.shape[1]/self.block_shape[1]),self.block_shape[0],self.block_shape[1]) strides = self.ha_Y.itemsize*(np.array([self.ha_Y.shape[1]*self.block_shape[0],self.block_shape[1],self.ha_Y.shape[1],1])) self.ha_Y_block = np.lib.stride_tricks.as_strided(self.ha_Y.copy(),self.ha_block_shape,strides) self.ha_U_block = np.lib.stride_tricks.as_strided(self.ha_U.copy(),self.ha_block_shape,strides) self.ha_V_block = np.lib.stride_tricks.as_strided(self.ha_V.copy(),self.ha_block_shape,strides)