Python cv2.COLOR_RGB2HSV Examples
The following are 30
code examples of cv2.COLOR_RGB2HSV().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: batch_generator.py From fcn8s_tensorflow with GNU General Public License v3.0 | 7 votes |
def _brightness(image, min=0.5, max=2.0): ''' Randomly changes the brightness of the input image. Protected against overflow. ''' hsv = cv2.cvtColor(image,cv2.COLOR_RGB2HSV) random_br = np.random.uniform(min,max) #To protect against overflow: Calculate a mask for all pixels #where adjustment of the brightness would exceed the maximum #brightness value and set the value to the maximum at those pixels. mask = hsv[:,:,2] * random_br > 255 v_channel = np.where(mask, 255, hsv[:,:,2] * random_br) hsv[:,:,2] = v_channel return cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
Example #2
Source File: object_detection_2d_photometric_ops.py From data_generator_object_detection_2d with GNU General Public License v3.0 | 7 votes |
def __call__(self, image, labels=None): if self.current == 'RGB' and self.to == 'HSV': image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif self.current == 'RGB' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) elif self.current == 'HSV' and self.to == 'RGB': image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) elif self.current == 'HSV' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) if labels is None: return image else: return image, labels
Example #3
Source File: imgproc.py From rps-cv with MIT License | 6 votes |
def hueDistance(img, hueValue): """Returns an image where the pixel values correspond to the distance from the hue value of the source image pixels and the hueValue argument.""" # Convert image to HSV colorspace hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) hChannel = hsv[:,:,0].astype(int) # Calculate hue distance dist = np.abs(hsv[:,:,0] - hueValue) if hueValue < 90: hueOffset = 180 else: hueOffset = -180 dist = np.minimum(np.abs(hChannel - hueValue), np.abs(hChannel - (hueValue + hueOffset))) return dist
Example #4
Source File: pil_aug_transforms.py From openseg.pytorch with MIT License | 6 votes |
def __call__(self, img, labelmap=None, maskmap=None): assert isinstance(img, Image.Image) assert labelmap is None or isinstance(labelmap, Image.Image) assert maskmap is None or isinstance(maskmap, Image.Image) if random.random() > self.ratio: return img, labelmap, maskmap img = np.array(img).astype(np.float32) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) img[:, :, 0] += random.uniform(-self.delta, self.delta) img[:, :, 0][img[:, :, 0] > 360] -= 360 img[:, :, 0][img[:, :, 0] < 0] += 360 img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) img = np.clip(img, 0, 255) return Image.fromarray(img.astype(np.uint8)), labelmap, maskmap
Example #5
Source File: image_process.py From Advanced_Lane_Lines with MIT License | 6 votes |
def test_yellow_white_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), w_low=(180,180,180), w_high=(255,255,255)): """ apply the thresh to images in a src folder and output to dst foler """ image_files = glob.glob(src+"*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) image_threshed = yellow_white_thresh(img, y_low, y_high, w_low, w_high) file_name = file.split("\\")[-1] print(file_name) out_image = dst+file_name print(out_image) # convert binary to RGB, *255, to visiual, 1 will not visual after write to file image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB) # HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # V = HSV[:,:,2] # brightness = np.mean(V) # info_str = "brightness is: {}".format(int(brightness)) # cv2.putText(image_threshed, info_str, (50,700), cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),2) cv2.imwrite(out_image, image_threshed)
Example #6
Source File: chineselib.py From ctw-baseline with MIT License | 6 votes |
def cv_preprocess_image(img, output_height, output_width, is_training): assert output_height == output_width img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) img[:, :, 0] = np.uint8((np.int32(img[:, :, 0]) + (180 + random.randrange(-9, 10))) % 180) img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) rows, cols, ch = img.shape output_size = output_width def r(): return (random.random() - 0.5) * 0.1 * output_size pts1 = np.float32([[0, 0], [cols, rows], [0, rows]]) pts2 = np.float32([[r(), r()], [output_size + r(), output_size + r()], [r(), output_size + r()]]) M = cv2.getAffineTransform(pts1, pts2) noize = np.random.normal(0, random.random() * (0.05 * 255), size=img.shape) img = np.array(img, dtype=np.float32) + noize img = cv2.warpAffine(img, M, (output_size, output_size), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) return img
Example #7
Source File: generator.py From keras-image-segmentation with MIT License | 6 votes |
def pre_processing(img): # Random exposure and saturation (0.9 ~ 1.1 scale) rand_s = random.uniform(0.9, 1.1) rand_v = random.uniform(0.9, 1.1) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) tmp = np.ones_like(img[:, :, 1]) * 255 img[:, :, 1] = np.where(img[:, :, 1] * rand_s > 255, tmp, img[:, :, 1] * rand_s) img[:, :, 2] = np.where(img[:, :, 2] * rand_v > 255, tmp, img[:, :, 2] * rand_v) img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) # Centering helps normalization image (-1 ~ 1 value) return img / 127.5 - 1 # Get ImageDataGenerator arguments(options) depends on mode - (train, val, test)
Example #8
Source File: dp.py From pytorch_stacked_hourglass with BSD 3-Clause "New" or "Revised" License | 6 votes |
def preprocess(self, data): # random hue and saturation data = cv2.cvtColor(data, cv2.COLOR_RGB2HSV); delta = (np.random.random() * 2 - 1) * 0.2 data[:, :, 0] = np.mod(data[:,:,0] + (delta * 360 + 360.), 360.) delta_sature = np.random.random() + 0.5 data[:, :, 1] *= delta_sature data[:,:, 1] = np.maximum( np.minimum(data[:,:,1], 1), 0 ) data = cv2.cvtColor(data, cv2.COLOR_HSV2RGB) # adjust brightness delta = (np.random.random() * 2 - 1) * 0.3 data += delta # adjust contrast mean = data.mean(axis=2, keepdims=True) data = (data - mean) * (np.random.random() + 0.5) + mean data = np.minimum(np.maximum(data, 0), 1) return data
Example #9
Source File: images_data_augmenter_seqaware.py From RecurrentGaze with MIT License | 6 votes |
def modify_illumination(images: list, ilrange: list, random_bright: float=None): """ Convert images to HSV color space, modify Value channel according to random brightness value and convert back to RGB. If random_bright is None, the random brightness value is uniformly sampled from ilrange tuple, otherwise random_bright is directly used. This brightness value is multiplied to the original Value channel. :param images: list of images :param ilrange: illumination range (min, max) from which the brightness value is uniformly sampled if random_bright is None. :param random_bright: optional value specifying the brightness multiplier. :return: transformed images, random_bright value """ if random_bright is None: random_bright = np.random.uniform(ilrange[0], ilrange[1]) new_images = [] for image in images: image1 = cv.cvtColor(image, cv.COLOR_RGB2HSV) image1[:, :, 2] = image1[:, :, 2] * random_bright image1[:, :, 2] = np.clip(image1[:, :, 2], 0., 255.) image1 = cv.cvtColor(image1, cv.COLOR_HSV2RGB) new_images.append(image1) return new_images, random_bright
Example #10
Source File: image.py From ImageAI with MIT License | 6 votes |
def random_distort_image(image, hue=18, saturation=1.5, exposure=1.5): # determine scale factors dhue = np.random.uniform(-hue, hue) dsat = _rand_scale(saturation) dexp = _rand_scale(exposure) # convert RGB space to HSV space image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV).astype('float') # change satuation and exposure image[:, :, 1] *= dsat image[:, :, 2] *= dexp # change hue image[:, :, 0] += dhue image[:, :, 0] -= (image[:, :, 0] > 180) * 180 image[:, :, 0] += (image[:, :, 0] < 0) * 180 # convert back to RGB from HSV return cv2.cvtColor(image.astype('uint8'), cv2.COLOR_HSV2RGB)
Example #11
Source File: dp.py From pose-ae-train with BSD 3-Clause "New" or "Revised" License | 6 votes |
def preprocess(self, data): # random hue and saturation data = cv2.cvtColor(data, cv2.COLOR_RGB2HSV); delta = (np.random.random() * 2 - 1) * 0.2 data[:, :, 0] = np.mod(data[:,:,0] + (delta * 360 + 360.), 360.) delta_sature = np.random.random() + 0.5 data[:, :, 1] *= delta_sature data[:,:, 1] = np.maximum( np.minimum(data[:,:,1], 1), 0 ) data = cv2.cvtColor(data, cv2.COLOR_HSV2RGB) # adjust brightness delta = (np.random.random() * 2 - 1) * 0.3 data += delta # adjust contrast mean = data.mean(axis=2, keepdims=True) data = (data - mean) * (np.random.random() + 0.5) + mean data = np.minimum(np.maximum(data, 0), 1) #cv2.imwrite('x.jpg', (data*255).astype(np.uint8)) return data
Example #12
Source File: test_color.py From imgaug with MIT License | 6 votes |
def _add_hue_saturation(cls, img, value=None, value_hue=None, value_saturation=None): if value is not None: assert value_hue is None and value_saturation is None else: assert value_hue is not None or value_saturation is not None if value is not None: value_hue = value value_saturation = value else: value_hue = value_hue or 0 value_saturation = value_saturation or 0 img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) img_hsv = img_hsv.astype(np.int32) img_hsv[..., 0] = np.mod( img_hsv[..., 0] + int((value_hue/255.0) * (360/2)), 180) img_hsv[..., 1] = np.clip( img_hsv[..., 1] + value_saturation, 0, 255) img_hsv = img_hsv.astype(np.uint8) return cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB)
Example #13
Source File: transforms.py From SSD-variants with MIT License | 6 votes |
def __call__(self, img): assert img.ndim == 3 and img.shape[2] == 3 if self.random.random_sample() >= self.prob: return img var = self.random.uniform(-self.var, self.var) to_HSV, from_HSV = [(cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2RGB), (cv2.COLOR_BGR2HSV, cv2.COLOR_HSV2BGR)][self.random.randint(2)] hsv = cv2.cvtColor(img, to_HSV).astype(np.float32) hue = hsv[:, :, 0] / 179. + var hue = hue - np.floor(hue) hsv[:, :, 0] = hue * 179. img = cv2.cvtColor(hsv.astype('uint8'), from_HSV) return img
Example #14
Source File: object_detection_2d_photometric_ops.py From Tensorflow-quantization-test with Apache License 2.0 | 6 votes |
def __call__(self, image, labels=None): if self.current == 'RGB' and self.to == 'HSV': image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif self.current == 'RGB' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) elif self.current == 'HSV' and self.to == 'RGB': image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) elif self.current == 'HSV' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) if labels is None: return image else: return image, labels
Example #15
Source File: diff.py From dvt with GNU General Public License v2.0 | 6 votes |
def _l1_quantile(batch, quantile=50, size=32): """Compute differences between subsequent frames in a batch. """ bsize = batch.bsize msize = bsize + 1 assert msize <= batch.get_frames().shape[0] simg = zeros((msize, size, size, 3)) for iran in range(msize): fsmall = resize(batch.get_frames()[iran, :, :, :], (size, size)) fsmall_hsv = cvtColor(fsmall, COLOR_RGB2HSV) simg[iran, :, :, :] = fsmall_hsv norm = simg[slice(0, bsize), :, :, :] - simg[slice(1, bsize + 1), :, :, :] return percentile(npabs(norm), q=quantile, axis=(1, 2, 3))
Example #16
Source File: image.py From keras-yolo3 with MIT License | 6 votes |
def random_distort_image(image, hue=18, saturation=1.5, exposure=1.5): # determine scale factors dhue = np.random.uniform(-hue, hue) dsat = _rand_scale(saturation); dexp = _rand_scale(exposure); # convert RGB space to HSV space image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV).astype('float') # change satuation and exposure image[:,:,1] *= dsat image[:,:,2] *= dexp # change hue image[:,:,0] += dhue image[:,:,0] -= (image[:,:,0] > 180)*180 image[:,:,0] += (image[:,:,0] < 0) *180 # convert back to RGB from HSV return cv2.cvtColor(image.astype('uint8'), cv2.COLOR_HSV2RGB)
Example #17
Source File: object_detection_2d_photometric_ops.py From keras-FP16-test with Apache License 2.0 | 6 votes |
def __call__(self, image, labels=None): if self.current == 'RGB' and self.to == 'HSV': image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif self.current == 'RGB' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) elif self.current == 'HSV' and self.to == 'RGB': image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) elif self.current == 'HSV' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) if labels is None: return image else: return image, labels
Example #18
Source File: color_histogram.py From RoboND-Perception-Intro with MIT License | 6 votes |
def color_hist(img, nbins=32, bins_range=(0, 256)): # Convert from RGB to HSV using cv2.cvtColor() hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # Compute the histogram of the HSV channels separately h_hist = np.histogram(hsv_img[:,:,0], bins=nbins, range=bins_range) s_hist = np.histogram(hsv_img[:,:,1], bins=nbins, range=bins_range) v_hist = np.histogram(hsv_img[:,:,2], bins=nbins, range=bins_range) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((h_hist[0], s_hist[0], v_hist[0])).astype(np.float64) # Normalize the result norm_features = hist_features / np.sum(hist_features) # Return the feature vector return norm_features
Example #19
Source File: transform.py From deeplabv3plus-pytorch with MIT License | 6 votes |
def __call__(self, sample): image = sample['image'] hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) h = hsv[:,:,0].astype(np.int32) s = hsv[:,:,1].astype(np.int32) v = hsv[:,:,2].astype(np.int32) delta_h = np.random.randint(-self.h_r,self.h_r) delta_s = np.random.randint(-self.s_r,self.s_r) delta_v = np.random.randint(-self.v_r,self.v_r) h = (h + delta_h)%180 s = s + delta_s s[s>255] = 255 s[s<0] = 0 v = v + delta_v v[v>255] = 255 v[v<0] = 0 hsv = np.stack([h,s,v], axis=-1).astype(np.uint8) image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB).astype(np.uint8) sample['image'] = image return sample
Example #20
Source File: functional.py From albumentations with MIT License | 6 votes |
def _shift_hsv_uint8(img, hue_shift, sat_shift, val_shift): dtype = img.dtype img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) hue, sat, val = cv2.split(img) lut_hue = np.arange(0, 256, dtype=np.int16) lut_hue = np.mod(lut_hue + hue_shift, 180).astype(dtype) lut_sat = np.arange(0, 256, dtype=np.int16) lut_sat = np.clip(lut_sat + sat_shift, 0, 255).astype(dtype) lut_val = np.arange(0, 256, dtype=np.int16) lut_val = np.clip(lut_val + val_shift, 0, 255).astype(dtype) hue = cv2.LUT(hue, lut_hue) sat = cv2.LUT(sat, lut_sat) val = cv2.LUT(val, lut_val) img = cv2.merge((hue, sat, val)).astype(dtype) img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) return img
Example #21
Source File: car_nocar.py From RoboND-Perception-Intro with MIT License | 6 votes |
def color_hist(img, nbins=32, bins_range=(0, 256)): # Convert from RGB to HSV using cv2.cvtColor() hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # Compute the histogram of the HSV channels separately h_hist = np.histogram(hsv_img[:,:,0], bins=nbins, range=bins_range) s_hist = np.histogram(hsv_img[:,:,1], bins=nbins, range=bins_range) v_hist = np.histogram(hsv_img[:,:,2], bins=nbins, range=bins_range) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((h_hist[0], s_hist[0], v_hist[0])).astype(np.float64) # Normalize the result norm_features = hist_features / np.sum(hist_features) # Return the feature vector return norm_features # Define a function to extract features from a list of images # Have this function call color_hist()
Example #22
Source File: functional.py From albumentations with MIT License | 6 votes |
def _shift_hsv_non_uint8(img, hue_shift, sat_shift, val_shift): dtype = img.dtype img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) hue, sat, val = cv2.split(img) if hue_shift != 0: hue = cv2.add(hue, hue_shift) hue = np.where(hue < 0, hue + 180, hue) hue = np.where(hue > 180, hue - 180, hue) hue = hue.astype(dtype) if sat_shift != 0: sat = clip(cv2.add(sat, sat_shift), dtype, 255 if dtype == np.uint8 else 1.0) if val_shift != 0: val = clip(cv2.add(val, val_shift), dtype, 255 if dtype == np.uint8 else 1.0) img = cv2.merge((hue, sat, val)).astype(dtype) img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) return img
Example #23
Source File: skin_detector.py From SkinDetector with MIT License | 6 votes |
def get_hsv_mask(img, debug=False): assert isinstance(img, numpy.ndarray), 'image must be a numpy array' assert img.ndim == 3, 'skin detection can only work on color images' logger.debug('getting hsv mask') lower_thresh = numpy.array([0, 50, 0], dtype=numpy.uint8) upper_thresh = numpy.array([120, 150, 255], dtype=numpy.uint8) img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) msk_hsv = cv2.inRange(img_hsv, lower_thresh, upper_thresh) msk_hsv[msk_hsv < 128] = 0 msk_hsv[msk_hsv >= 128] = 1 if debug: scripts.display('input', img) scripts.display('mask_hsv', msk_hsv) return msk_hsv.astype(float)
Example #24
Source File: object_detection_2d_photometric_ops.py From ssd_keras with Apache License 2.0 | 6 votes |
def __call__(self, image, labels=None): if self.current == 'RGB' and self.to == 'HSV': image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif self.current == 'RGB' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) elif self.current == 'HSV' and self.to == 'RGB': image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) elif self.current == 'HSV' and self.to == 'GRAY': image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY) if self.keep_3ch: image = np.stack([image] * 3, axis=-1) if labels is None: return image else: return image, labels
Example #25
Source File: augmentationske2e.py From DewarpNet with MIT License | 5 votes |
def color_jitter(im, brightness=0, contrast=0, saturation=0, hue=0): f = random.uniform(1 - contrast, 1 + contrast) im = np.clip(im * f, 0., 1.) f = random.uniform(-brightness, brightness) im = np.clip(im + f, 0., 1.).astype(np.float32) hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV) f = random.uniform(-hue, hue)*360. hsv[:,:,0] = np.clip(hsv[:,:,0] + f, 0., 360.) f = random.uniform(-saturation, saturation) hsv[:,:,1] = np.clip(hsv[:,:,1] + f, 0., 1.) im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return im
Example #26
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def shift_hsv(img, hue_shift, sat_shift, val_shift): dtype = img.dtype img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32) h, s, v = cv2.split(img) h = cv2.add(h, hue_shift) h = np.where(h < 0, 255 - h, h) h = np.where(h > 255, h - 255, h) h = h.astype(dtype) s = clip(cv2.add(s, sat_shift), dtype, 255 if dtype == np.uint8 else 1.) v = clip(cv2.add(v, val_shift), dtype, 255 if dtype == np.uint8 else 1.) img = cv2.merge((h, s, v)).astype(dtype) img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) return img
Example #27
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def shift_hsv(img, hue_shift, sat_shift, val_shift): dtype = img.dtype img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32) h, s, v = cv2.split(img) h = cv2.add(h, hue_shift) h = np.where(h < 0, 255 - h, h) h = np.where(h > 255, h - 255, h) h = h.astype(dtype) s = clip(cv2.add(s, sat_shift), dtype, 255 if dtype == np.uint8 else 1.) v = clip(cv2.add(v, val_shift), dtype, 255 if dtype == np.uint8 else 1.) img = cv2.merge((h, s, v)).astype(dtype) img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB) return img
Example #28
Source File: colorize.py From SRNet-Datagen with Apache License 2.0 | 5 votes |
def mean_color(self, arr): col = cv2.cvtColor(arr, cv2.COLOR_RGB2HSV) col = np.reshape(col, (np.prod(col.shape[:2]),3)) col = np.mean(col, axis = 0).astype(np.uint8) return np.squeeze(cv2.cvtColor(col[None,None,:], cv2.COLOR_HSV2RGB))
Example #29
Source File: colorize.py From SRNet-Datagen with Apache License 2.0 | 5 votes |
def triangle_color(self, col1, col2): col1, col2 = np.array(col1), np.array(col2) col1 = np.squeeze(cv2.cvtColor(col1[None,None,:], cv2.COLOR_RGB2HSV)) col2 = np.squeeze(cv2.cvtColor(col2[None,None,:], cv2.COLOR_RGB2HSV)) h1, h2 = col1[0], col2[0] if h2 < h1: h1, h2 = h2, h1 #swap dh = h2 - h1 if dh < 127: dh = 255 - dh col1[0] = h1 + dh / 2 return np.squeeze(cv2.cvtColor(col1[None,None,:],cv2.COLOR_HSV2RGB))
Example #30
Source File: colorize.py From SRNet-Datagen with Apache License 2.0 | 5 votes |
def complement(self, rgb_color): col_hsv = np.squeeze(cv2.cvtColor(rgb_color[None,None,:], cv2.COLOR_RGB2HSV)) col_hsv[0] = col_hsv[0] + 128 #uint8 mods to 255 col_comp = np.squeeze(cv2.cvtColor(col_hsv[None,None,:], cv2.COLOR_HSV2RGB)) return col_comp