Python cv2.COLOR_GRAY2BGR Examples
The following are 30
code examples of cv2.COLOR_GRAY2BGR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: thresholding.py From smashscan with MIT License | 12 votes |
def contour_filter(self, frame): _, contours, _ = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) new_frame = np.zeros(frame.shape, np.uint8) for i, contour in enumerate(contours): c_area = cv2.contourArea(contour) if self.contour_min_area <= c_area <= self.contour_max_area: mask = np.zeros(frame.shape, np.uint8) cv2.drawContours(mask, contours, i, 255, cv2.FILLED) mask = cv2.bitwise_and(frame, mask) new_frame = cv2.bitwise_or(new_frame, mask) frame = new_frame if self.contour_disp_flag: frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) cv2.drawContours(frame, contours, -1, (255, 0, 0), 1) return frame # A number of methods corresponding to the various trackbars available.
Example #2
Source File: utils.py From tf2-yolo3 with Apache License 2.0 | 8 votes |
def draw_labels(x, y, class_names=None): img = x.numpy() if img.ndim == 2 or img.shape[2] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) boxes, classes = tf.split(y, (4, 1), axis=-1) classes = classes[..., 0] wh = np.flip(img.shape[0:2]) min_wh = np.amin(wh) if min_wh <= 100: font_size = 0.5 else: font_size = 1 for i in range(len(boxes)): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 1) if class_names: img = cv2.putText(img, class_names[classes[i]], x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size, (0, 0, 255), 1) else: img = cv2.putText(img, str(classes[i]), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1) return img
Example #3
Source File: utils.py From tf2-yolo3 with Apache License 2.0 | 8 votes |
def draw_outputs(img, outputs, class_names=None): boxes, objectness, classes = outputs #boxes, objectness, classes = boxes[0], objectness[0], classes[0] wh = np.flip(img.shape[0:2]) if img.ndim == 2 or img.shape[2] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) min_wh = np.amin(wh) if min_wh <= 100: font_size = 0.5 else: font_size = 1 for i in range(classes.shape[0]): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 1) img = cv2.putText(img, '{}'.format(int(classes[i])), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size, (0, 0, 255), 1) return img
Example #4
Source File: util.py From smashscan with MIT License | 8 votes |
def get_image_and_mask(img_location, gray_flag): # Load image from file with alpha channel (UNCHANGED flag). If an alpha # channel does not exist, just return the base image. img = cv2.imread(img_location, cv2.IMREAD_UNCHANGED) if img.shape[2] <= 3: return img, None # Create an alpha channel matrix with values between 0-255. Then # threshold the alpha channel to create a binary mask. channels = cv2.split(img) mask = np.array(channels[3]) _, mask = cv2.threshold(mask, 250, 255, cv2.THRESH_BINARY) # Convert image and mask to grayscale or BGR based on input flag. if gray_flag: img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY) else: img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) return img, mask # Resize an image and mask based on an input scale ratio.
Example #5
Source File: helpers.py From songoku with MIT License | 6 votes |
def blend_non_transparent(face_img, overlay_img): # Let's find a mask covering all the non-black (foreground) pixels # NB: We need to do this on grayscale version of the image gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY) overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1] # Let's shrink and blur it a little to make the transitions smoother... overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) overlay_mask = cv2.blur(overlay_mask, (3, 3)) # And the inverse mask, that covers all the black (background) pixels background_mask = 255 - overlay_mask # Turn the masks into three channel, so we can use them as weights overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR) background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR) # Create a masked out face image, and masked out overlay # We convert the images to floating point in range 0.0 - 1.0 face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0)) overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0)) # And finally just add them together, and rescale it back to an 8bit integer image return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
Example #6
Source File: 05_cartoonizing.py From OpenCV-3-x-with-Python-By-Example with MIT License | 6 votes |
def cartoonize_image(img, ksize=5, sketch_mode=False): num_repetitions, sigma_color, sigma_space, ds_factor = 10, 5, 7, 4 # Convert image to grayscale img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Apply median filter to the grayscale image img_gray = cv2.medianBlur(img_gray, 7) # Detect edges in the image and threshold it edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=ksize) ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV) # 'mask' is the sketch of the image if sketch_mode: return cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) # Resize the image to a smaller size for faster computation img_small = cv2.resize(img, None, fx=1.0/ds_factor, fy=1.0/ds_factor, interpolation=cv2.INTER_AREA) # Apply bilateral filter the image multiple times for i in range(num_repetitions): img_small = cv2.bilateralFilter(img_small, ksize, sigma_color, sigma_space) img_output = cv2.resize(img_small, None, fx=ds_factor, fy=ds_factor, interpolation=cv2.INTER_LINEAR) dst = np.zeros(img_gray.shape) # Add the thick boundary lines to the image using 'AND' operator dst = cv2.bitwise_and(img_output, img_output, mask=mask) return dst
Example #7
Source File: CThermal.py From Thermal_Image_Analysis with MIT License | 6 votes |
def generate_colorbar(self, min_temp=None, max_temp=None, cmap=cv.COLORMAP_JET, height=None): if min_temp is None: min_temp = self.global_min_temp if max_temp is None: max_temp = self.global_max_temp cb_gray = np.arange(255,0,-1,dtype=np.uint8).reshape((255,1)) if cmap is not None: cb_color = cv.applyColorMap(cb_gray, cmap) else: cb_color = cv.cvtColor(cb_gray, cv.COLOR_GRAY2BGR) for i in range(1,6): cb_color = np.concatenate( (cb_color, cb_color), axis=1 ) if height is None: append_img = np.zeros( (self.thermal_image.shape[0], cb_color.shape[1]+30, 3), dtype=np.uint8 ) else: append_img = np.zeros( (height, cb_color.shape[1]+30, 3), dtype=np.uint8 ) append_img[append_img.shape[0]//2-cb_color.shape[0]//2 : append_img.shape[0]//2 - (cb_color.shape[0]//2) + cb_color.shape[0] , 10 : 10 + cb_color.shape[1] ] = cb_color cv.putText(append_img, str(min_temp), (5, append_img.shape[0]//2 - (cb_color.shape[0]//2) + cb_color.shape[0] + 30), cv.FONT_HERSHEY_PLAIN, 1, (255,0,0) , 1, 8) cv.putText(append_img, str(max_temp), (5, append_img.shape[0]//2-cb_color.shape[0]//2-20) , cv.FONT_HERSHEY_PLAIN, 1, (0,0,255) , 1, 8 ) return append_img
Example #8
Source File: utils.py From QuickDraw with MIT License | 6 votes |
def get_overlay(bg_image, fg_image, sizes=(40, 40)): fg_image = cv2.resize(fg_image, sizes) fg_mask = fg_image[:, :, 3:] fg_image = fg_image[:, :, :3] bg_mask = 255 - fg_mask bg_image = bg_image/255 fg_image = fg_image/255 fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR)/255 bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR)/255 image = cv2.addWeighted(bg_image*bg_mask, 255, fg_image*fg_mask, 255, 0.).astype(np.uint8) return image # if __name__ == '__main__': # images = get_images("../images", ["apple", "star"]) # print(images[0].shape) # print(np.max(images[0]))
Example #9
Source File: system.py From sips2_open with GNU General Public License v3.0 | 6 votes |
def render(self, n_max=0, fallback_im=None): if self.image_scores is not None: im = cv2.applyColorMap((self.image_scores * 255).astype(np.uint8), cv2.COLORMAP_JET) else: assert fallback_im is not None im = cv2.cvtColor(fallback_im, cv2.COLOR_GRAY2BGR) if n_max == 0: n_max = self.ips_rc.shape[1] for i in range(n_max): thickness_relevant_score = \ np.clip(self.ip_scores[i], 0.2, 0.6) - 0.2 thickness = int(thickness_relevant_score * 20) if type(self.scales) == np.ndarray: radius = int(self.scales[i] * 10) else: radius = 10 cv2.circle(im, tuple(self.ips_rc[[1, 0], i]), radius, (0, 255, 0), thickness, cv2.LINE_AA) return im
Example #10
Source File: image_functions.py From niryo_one_ros with GNU General Public License v3.0 | 6 votes |
def concat_imgs(tuple_imgs, axis=1): """ # Concat multiple images along 1 axis :param tuple_imgs: tuple of images :param axis: 0 means vertically and 1 means horizontally :return: Concat image """ new_list_imgs = [] for image in tuple_imgs: if len(image.shape) == 2: image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) new_list_imgs.append(image) concat_im = np.concatenate(tuple(new_list_imgs), axis=axis) return concat_im # ROS
Example #11
Source File: helpers.py From hazymaze with Apache License 2.0 | 6 votes |
def blend_non_transparent(sprite, background_img): gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY) overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1] overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) overlay_mask = cv2.blur(overlay_mask, (3, 3)) background_mask = 255 - overlay_mask overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR) background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR) sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0)) overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0)) return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0))
Example #12
Source File: digits.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def evaluate_model(model, digits, samples, labels): resp = model.predict(samples) err = (labels != resp).mean() print('error: %.2f %%' % (err*100)) confusion = np.zeros((10, 10), np.int32) for i, j in zip(labels, resp): confusion[i, int(j)] += 1 print('confusion matrix:') print(confusion) print() vis = [] for img, flag in zip(digits, resp == labels): img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if not flag: img[...,:2] = 0 vis.append(img) return mosaic(25, vis)
Example #13
Source File: Emojinator_V2.py From Emojinator with MIT License | 6 votes |
def blend_transparent(face_img, overlay_t_img): # Split out the transparency mask from the colour info overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane # Again calculate the inverse mask background_mask = 255 - overlay_mask # Turn the masks into three channel, so we can use them as weights overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR) background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR) # Create a masked out face image, and masked out overlay # We convert the images to floating point in range 0.0 - 1.0 face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0)) overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0)) # And finally just add them together, and rescale it back to an 8bit integer image return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
Example #14
Source File: evaluate.py From imips_open with GNU General Public License v3.0 | 6 votes |
def renderTrainSample(pair, fps, corr_rc, inl): ims = [cv2.cvtColor(i, cv2.COLOR_GRAY2BGR) for i in pair.im] rc = system.renderColors() for fp, corr, im in zip(fps, corr_rc, ims): for i in range(128): cv2.circle(im, tuple(fp.ips_rc[[1, 0], i]), 6, tuple(rc[i]), 1, cv2.LINE_AA) if inl[i]: thck = -1 else: thck = 1 cv2.circle(im, tuple(corr[[1, 0], i]), 2, tuple(rc[i]), thck, cv2.LINE_AA) renderings = [i.render(with_points=False) for i in fps] gray_ims = np.concatenate(ims, axis=0) rend = np.concatenate(renderings, axis=0) full_im = np.concatenate([gray_ims, rend], axis=1) outdir = os.path.join( 'results', 'train_samples', hyperparams.methodEvalString()) if not os.path.exists(outdir): os.makedirs(outdir) outfile = os.path.join(outdir, pair.name() + '.png') cv2.imwrite(outfile, full_im)
Example #15
Source File: sips_system.py From imips_open with GNU General Public License v3.0 | 6 votes |
def render(self, n_max=0, fallback_im=None): if self.image_scores is not None: im = cv2.applyColorMap((self.image_scores * 255).astype(np.uint8), cv2.COLORMAP_JET) else: assert fallback_im is not None im = cv2.cvtColor(fallback_im, cv2.COLOR_GRAY2BGR) if n_max == 0: n_max = self.ips_rc.shape[1] for i in range(n_max): thickness_relevant_score = \ np.clip(self.ip_scores[i], 0.2, 0.6) - 0.2 thickness = int(thickness_relevant_score * 20) if type(self.scales) == np.ndarray: radius = int(self.scales[i] * 10) else: radius = 10 cv2.circle(im, tuple(self.ips_rc[[1, 0], i]), radius, (0, 255, 0), thickness, cv2.LINE_AA) return im
Example #16
Source File: classification.py From Traffic-Sign-Detection with MIT License | 6 votes |
def evaluate_model(model, data, samples, labels): resp = model.predict(samples) print(resp) err = (labels != resp).mean() print('Accuracy: %.2f %%' % ((1 - err)*100)) confusion = np.zeros((10, 10), np.int32) for i, j in zip(labels, resp): confusion[int(i), int(j)] += 1 print('confusion matrix:') print(confusion) vis = [] for img, flag in zip(data, resp == labels): img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if not flag: img[...,:2] = 0 vis.append(img) return mosaic(16, vis)
Example #17
Source File: convert_to_3_channel.py From image_utility with MIT License | 5 votes |
def main(): # Read in image list to be converted. with open('gray.json', 'r') as fp: img_list = json.load(fp) logging.debug("Total files to be converted: {}".format(len(img_list))) # Convert them into 3 channel images. for each_file in tqdm(img_list): img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR) if len(img.shape) == 3: print("Not a gray image: {}".format(each_file)) continue cv2.imshow('preview', img) if cv2.waitKey(30) == 27: break # Do convertion img_converted = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Write to file. cv2.imwrite(each_file, img_converted) # Check if convertion failed. img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR) assert len(img.shape) == 3, "Convertion failed: {}".format(each_file)
Example #18
Source File: viz.py From petridishnn with MIT License | 5 votes |
def draw_boxes(im, boxes, labels=None, color=None): """ Args: im (np.ndarray): a BGR image in range [0,255]. It will not be modified. boxes (np.ndarray): a numpy array of shape Nx4 where each row is [x1, y1, x2, y2]. labels: (list[str] or None) color: a 3-tuple BGR color (in range [0, 255]) Returns: np.ndarray: a new image. """ boxes = np.asarray(boxes, dtype='int32') if labels is not None: assert len(labels) == len(boxes), "{} != {}".format(len(labels), len(boxes)) areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1) sorted_inds = np.argsort(-areas) # draw large ones first assert areas.min() > 0, areas.min() # allow equal, because we are not very strict about rounding error here assert boxes[:, 0].min() >= 0 and boxes[:, 1].min() >= 0 \ and boxes[:, 2].max() <= im.shape[1] and boxes[:, 3].max() <= im.shape[0], \ "Image shape: {}\n Boxes:\n{}".format(str(im.shape), str(boxes)) im = im.copy() if color is None: color = (15, 128, 15) if im.ndim == 2 or (im.ndim == 3 and im.shape[2] == 1): im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) for i in sorted_inds: box = boxes[i, :] if labels is not None: im = draw_text(im, (box[0], box[1]), labels[i], color=color) cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]), color=color, thickness=1) return im
Example #19
Source File: dcfnet.py From open-vot with MIT License | 5 votes |
def init(self, image, init_rect): self.model.eval() self.target_pos = init_rect[:2] + init_rect[2:] / 2 - 1 self.target_sz = init_rect[2:] self.min_sz = np.maximum(self.cfg.min_scale_factor * self.target_sz, 4) self.max_sz = np.minimum(image.shape[:2], self.cfg.max_scale_factor * self.target_sz) self.padded_sz = self.target_sz * (1 + self.cfg.padding) # get feature size and initialize hanning window if image.ndim == 2: image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) elif image.ndim == 3: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) target = warp_cv2(image, self.target_pos, self.padded_sz, self.cfg.net_input_size, (0, 0, 0)) target = target - self.cfg.net_average_image # cv2.imshow('response', target) # cv2.waitKey(0) target = torch.from_numpy(target).cuda().permute(2, 0, 1).unsqueeze(0).float() self.model.update(target) self.patch_crop = torch.zeros(self.cfg.num_scale, target.shape[1], target.shape[2], target.shape[3]).cuda() # buff
Example #20
Source File: transforms.py From kaggle_carvana_segmentation with MIT License | 5 votes |
def __call__(self, img): # dont work :( if random.random() < self.prob: maxval = np.max(img[..., :3]) dtype = img.dtype alpha = 1.0 + random.uniform(-self.limit, self.limit) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) img[..., :3] = alpha * img[..., :3] + (1.0 - alpha) * gray img[..., :3] = clip(img[..., :3], dtype, maxval) return img
Example #21
Source File: pdf-to-csv-cv.py From pdf-to-csv-table-extactor with Do What The F*ck You Want To Public License | 5 votes |
def extract_main_table(gray_image): inverted = cv2.bitwise_not(gray_image) blurred = cv2.GaussianBlur(inverted, (5, 5), 0) thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] if DEBUG: show_wait_destroy("thresholded",thresholded) cnts = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[1]# if imutils.is_cv2() else cnts[1] cnts = sorted(cnts, key=cv2.contourArea, reverse=True) rect = cv2.minAreaRect(cnts[0]) box = cv2.boxPoints(rect) box = np.int0(box) extracted = four_point_transform(gray_image.copy(), box.reshape(4, 2)) if DEBUG: color_image = cv2.cvtColor(gray_image.copy(), cv2.COLOR_GRAY2BGR) cv2.drawContours(color_image,[box],0,(0,0,255),2) cv2.drawContours(color_image, [cnts[0]], -1, (0, 255, 0), 2) return extracted
Example #22
Source File: dcf.py From open-vot with MIT License | 5 votes |
def update(self, image): if image.ndim == 2: image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) if self.resize_image: size = (int(image.shape[1] / 2), int(image.shape[0] / 2)) image = cv2.resize(image, size) # locate target x = self._crop(image, self.t_center, self.padded_sz) x = self.hann_window * fast_hog(np.float32(x), self.cfg.cell_size) kf = self._linear_correlation(fft2(x), self.zf) score = real(ifft2(complex_mul(self.alphaf, kf))) offset = self._locate_target(score) self.t_center += offset * self.cfg.cell_size # limit the estimated bounding box to be overlapped with the image self.t_center = np.clip( self.t_center, -self.t_sz / 2 + 2, image.shape[1::-1] + self.t_sz / 2 - 1) # update model new_z = self._crop(image, self.t_center, self.padded_sz) new_z = fast_hog(np.float32(new_z), self.cfg.cell_size) new_zf = fft2(new_z * self.hann_window) kf = self._linear_correlation(new_zf, new_zf) new_alphaf = complex_div(self.yf, complex_add(kf, self.cfg.lambda_)) self.alphaf = (1 - self.cfg.interp_factor) * self.alphaf + \ self.cfg.interp_factor * new_alphaf self.zf = (1 - self.cfg.interp_factor) * self.zf + \ self.cfg.interp_factor * new_zf bndbox = np.concatenate([ self.t_center - self.t_sz / 2, self.t_sz]) if self.resize_image: bndbox = bndbox * 2 return bndbox
Example #23
Source File: eval.py From kaggle_carvana_segmentation with MIT License | 5 votes |
def visualize(self, show_light=False, show_base=True): dsize = None hmap = heatmap(self.full_pred) if self.full_image is not None and show_light: light_heat = cv2.addWeighted(self.full_image[:,:,:3], 0.6, hmap, 0.4, 0) if dsize: light_heat = cv2.resize(light_heat, (dsize, dsize)) cv2.imshow('light heat', light_heat) if self.full_mask is not None and self.show_mask: light_mask = cv2.addWeighted(self.full_image[:,:,:3], 0.6, cv2.cvtColor(self.full_mask, cv2.COLOR_GRAY2BGR), 0.4, 0) if dsize: light_mask = cv2.resize(light_mask, (dsize, dsize)) cv2.imshow('light mask', light_mask) if self.full_image is not None and show_base: if dsize: cv2.imshow('image', cv2.resize(self.full_image[:,:,:3], (dsize, dsize))) else: cv2.imshow('image', self.full_image[:,:,:3]) if dsize: hmap = cv2.resize(hmap, (dsize, dsize)) cv2.imshow('heatmap', hmap) if self.full_mask is not None and self.show_mask: if dsize: cv2.imshow('mask', cv2.resize(self.full_mask, (dsize, dsize))) else: cv2.imshow('mask', self.full_mask) if show_light or show_base: cv2.waitKey()
Example #24
Source File: util.py From EDVR with Apache License 2.0 | 5 votes |
def channel_convert(in_c, tar_type, img_list): """conversion among BGR, gray and y""" if in_c == 3 and tar_type == 'gray': # BGR to gray gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] return [np.expand_dims(img, axis=2) for img in gray_list] elif in_c == 3 and tar_type == 'y': # BGR to y y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] return [np.expand_dims(img, axis=2) for img in y_list] elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] else: return img_list
Example #25
Source File: util.py From IKC with Apache License 2.0 | 5 votes |
def channel_convert(in_c, tar_type, img_list): # conversion among BGR, gray and y if in_c == 3 and tar_type == 'gray': # BGR to gray gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] return [np.expand_dims(img, axis=2) for img in gray_list] elif in_c == 3 and tar_type == 'y': # BGR to y y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] return [np.expand_dims(img, axis=2) for img in y_list] elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] else: return img_list
Example #26
Source File: input.py From FaceLock with MIT License | 5 votes |
def read_image(file_path): image = cv2.imread(file_path) if GRAY_MODE == True: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) image = resize_with_pad(image, IMAGE_SIZE, IMAGE_SIZE) return image
Example #27
Source File: system.py From imips_open with GNU General Public License v3.0 | 5 votes |
def render(self, with_points=True, fallback_im=None): if self.image_scores is not None: assert len(self.image_scores.shape) == 3 assert self.image_scores.shape[2] == 128 dom_channel = np.argmax(self.image_scores, axis=2) max_score = np.max(self.image_scores, axis=2) hsv = np.stack((dom_channel * renderHueMultiplier(), 255 * np.ones_like(dom_channel), (np.clip(max_score, 0, 0.5)) * 2 * 255), axis=2).astype(np.uint8) bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) else: bgr = cv2.cvtColor(fallback_im, cv2.COLOR_GRAY2BGR) if with_points: rc = renderColors() for i in range(128): if i == len(self.ip_scores): assert FLAGS.baseline != '' break thickness_relevant_score = \ np.clip(self.ip_scores[i], 0.4, 0.6) - 0.4 thickness = int(thickness_relevant_score * 40) radius = int(self.scales[i] * 10) cv2.circle(bgr, tuple(self.ips_rc[[1, 0], i]), radius, tuple(rc[i]), thickness, cv2.LINE_AA) return bgr
Example #28
Source File: image_functions.py From niryo_one_ros with GNU General Public License v3.0 | 5 votes |
def draw_contours(img, contours): """ Draw a list of contour on an image and return the drawing image :param img: Image :param contours: contours list :return: Image with drawing """ if len(img.shape) == 2: img_bgr = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) else: img_bgr = img.copy() cv2.drawContours(img_bgr, contours, -1, (255, 0, 0), 3) return img_bgr
Example #29
Source File: evaluate.py From imips_open with GNU General Public License v3.0 | 5 votes |
def renderMatching(pair, fps): """ returns true inlier count, R_err and t_err """ _, true_count, mask, R_err, t_err = evaluatePair(pair, fps) renderings = [i.render(fallback_im=im) for i, im in zip(fps, pair.im)] ims = [cv2.cvtColor(i, cv2.COLOR_GRAY2BGR) for i in pair.im] roffsets = [i * ims[0].shape[0] for i in [1, 2]] full_im = np.concatenate( [ims[0], renderings[0], renderings[1], ims[1]], axis=0) inl_ips = [fp.ips_rc[:, mask] for fp in fps] endpoints = [inl_ips[i] + np.array([[roffsets[i], 0]]).T for i in [0, 1]] rc = system.renderColors() inl_colors = rc[np.nonzero(mask)[0]] for i in range(endpoints[0].shape[1]): cv2.line(full_im, tuple(endpoints[0][[1, 0], i]), tuple(endpoints[1][[1, 0], i]), tuple(inl_colors[i]), 2, cv2.LINE_AA) outdir = os.path.join( 'results', 'match_render', hyperparams.methodEvalString()) if not os.path.exists(outdir): os.makedirs(outdir) outfile = os.path.join(outdir, pair.name() + '.png') cv2.imwrite(outfile, full_im) return true_count, R_err, t_err
Example #30
Source File: dead.py From IkaLog with Apache License 2.0 | 5 votes |
def recoginize_and_vote_death_reason(self, context): if self.deadly_weapon_recoginizer is None: return False lang_short = Localization.get_game_languages()[0][0:2] try: c = self.choordinates[lang_short] except KeyError: c = self.choordinates['en'] img_weapon = context['engine']['frame'][ c['top']:c['top'] + 51, c['left']:c['left'] + 410 ] img_weapon_gray = cv2.cvtColor(img_weapon, cv2.COLOR_BGR2GRAY) img_weapon_hsv = cv2.cvtColor(img_weapon, cv2.COLOR_BGR2HSV) img_weapon_gray[img_weapon_hsv[:, :, 1] > 32] = 0 ret, img_weapon_b = cv2.threshold( img_weapon_gray, 220, 255, cv2.THRESH_BINARY) # (覚) 学習用に保存しておくのはこのデータ。 Change to 1 for training. if 0: # (self.time_last_write + 5000 < context['engine']['msec']): import time filename = os.path.join( # training/ directory must already exist 'training', '_deadly_weapons.%s.png' % time.time()) cv2.imwrite(filename, img_weapon_b) self.time_last_write = context['engine']['msec'] # Workaround for languages that deadly_weapons is not trained if not Localization.get_game_languages()[0] in ['ja', 'en_NA']: return img_weapon_b_bgr = cv2.cvtColor(img_weapon_b, cv2.COLOR_GRAY2BGR) weapon_id = self.deadly_weapon_recoginizer.match(img_weapon_b_bgr) # 投票する(あとでまとめて開票) votes = self._cause_of_death_votes votes[weapon_id] = votes.get(weapon_id, 0) + 1