Python cv2.COLOR_RGB2BGR Examples
The following are 30
code examples of cv2.COLOR_RGB2BGR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: generate_coco_json.py From coco-json-converter with GNU General Public License v3.0 | 14 votes |
def __get_annotation__(self, mask, image=None): _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentation = [] for contour in contours: # Valid polygons have >= 6 coordinates (3 points) if contour.size >= 6: segmentation.append(contour.flatten().tolist()) RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1]) RLE = cocomask.merge(RLEs) # RLE = cocomask.encode(np.asfortranarray(mask)) area = cocomask.area(RLE) [x, y, w, h] = cv2.boundingRect(mask) if image is not None: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.drawContours(image, contours, -1, (0,255,0), 1) cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2) cv2.imshow("", image) cv2.waitKey(1) return segmentation, [x, y, w, h], area
Example #2
Source File: model.py From CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing with MIT License | 6 votes |
def plotResults(fname, result_list): columm = [] for fig in result_list: shape = fig.shape fig = fig.numpy() row = [] for idx in range(shape[0]): item = fig[idx, :, :, :] if item.shape[2] == 1: item = np.concatenate([item, item, item], axis=2) item = cv2.cvtColor(cv2.resize(item, (128, 128)), cv2.COLOR_RGB2BGR) row.append(item) row = np.concatenate(row, axis=1) columm.append(row) columm = np.concatenate(columm, axis=0) img = np.uint8(columm * 255) cv2.imwrite(fname, img) ############################################################ # Deep Tree Network ############################################################
Example #3
Source File: helpers.py From Advanced_Lane_Lines with MIT License | 6 votes |
def undistort_images(src, dst): """ undistort the images in src folder to dst folder """ # load dst, mtx pickle_file = open("../camera_cal/camera_cal.p", "rb") dist_pickle = pickle.load(pickle_file) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] pickle_file.close() # loop the image folder image_files = glob.glob(src+"*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) image_dist = cv2.undistort(img, mtx, dist, None, mtx) file_name = file.split("\\")[-1] print(file_name) out_image = dst+file_name print(out_image) image_dist = cv2.cvtColor(image_dist, cv2.COLOR_RGB2BGR) cv2.imwrite(out_image, image_dist)
Example #4
Source File: gui_draw.py From interactive-deep-colorization with MIT License | 6 votes |
def save_result(self): path = os.path.abspath(self.image_file) path, ext = os.path.splitext(path) suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S") save_path = "_".join([path, self.method, suffix]) print('saving result to <%s>\n' % save_path) if not os.path.exists(save_path): os.mkdir(save_path) np.save(os.path.join(save_path, 'im_l.npy'), self.model.img_l) np.save(os.path.join(save_path, 'im_ab.npy'), self.im_ab0) np.save(os.path.join(save_path, 'im_mask.npy'), self.im_mask0) result_bgr = cv2.cvtColor(self.result, cv2.COLOR_RGB2BGR) mask = self.im_mask0.transpose((1, 2, 0)).astype(np.uint8) * 255 cv2.imwrite(os.path.join(save_path, 'input_mask.png'), mask) cv2.imwrite(os.path.join(save_path, 'ours.png'), result_bgr) cv2.imwrite(os.path.join(save_path, 'ours_fullres.png'), self.model.get_img_fullres()[:, :, ::-1]) cv2.imwrite(os.path.join(save_path, 'input_fullres.png'), self.model.get_input_img_fullres()[:, :, ::-1]) cv2.imwrite(os.path.join(save_path, 'input.png'), self.model.get_input_img()[:, :, ::-1]) cv2.imwrite(os.path.join(save_path, 'input_ab.png'), self.model.get_sup_img()[:, :, ::-1])
Example #5
Source File: utils.py From AdaIN-TF with MIT License | 6 votes |
def preserve_colors_np(style_rgb, content_rgb): coraled = coral_numpy(style_rgb/255., content_rgb/255.) coraled = np.uint8(np.clip(coraled, 0, 1) * 255.) return coraled # def preserve_colors_pytorch(style_rgb, content_rgb): # coraled = coral_pytorch(style_rgb/255., content_rgb/255.) # coraled = np.uint8(np.clip(coraled, 0, 1) * 255.) # return coraled # def preserve_colors_color_transfer(style_rgb, content_rgb): # style_bgr = cv2.cvtColor(style_rgb, cv2.COLOR_RGB2BGR) # content_bgr = cv2.cvtColor(content_rgb, cv2.COLOR_RGB2BGR) # transferred = color_transfer(content_bgr, style_bgr) # return cv2.cvtColor(transferred, cv2.COLOR_BGR2RGB) ### Video/Webcam helpers ### Borrowed from https://github.com/jrosebr1/imutils/
Example #6
Source File: helpers.py From Advanced_Lane_Lines with MIT License | 6 votes |
def wrap_images(src, dst): """ apply the wrap to images """ # load M, Minv img_size = (1280, 720) pickle_file = open("../helper/trans_pickle.p", "rb") trans_pickle = pickle.load(pickle_file) M = trans_pickle["M"] Minv = trans_pickle["Minv"] # loop the file folder image_files = glob.glob(src+"*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) image_wraped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) file_name = file.split("\\")[-1] print(file_name) out_image = dst+file_name print(out_image) # no need to covert RGB to BGR since 3 channel is same image_wraped = cv2.cvtColor(image_wraped, cv2.COLOR_RGB2BGR) cv2.imwrite(out_image, image_wraped)
Example #7
Source File: RvMediaUtility.py From MNIST-Deep-Learning with GNU General Public License v3.0 | 6 votes |
def RGB_To_CvBGR(img): return cv2.cvtColor(img,cv2.COLOR_RGB2BGR) #def saveAsPNG(_2dArray, filename): # if any([len(row) != len(_2dArray[0]) for row in _2dArray]): # raise ValueError, "_2dArray should have elements of equal size" # # #First row becomes top row of image. # flat = []; map(flat.extend, reversed(_2dArray)) # #Big-endian, unsigned 32-byte integer. # buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) # for i32 in flat]) #Rotate from ARGB to RGBA. # # data = write_png(buf, len(_2dArray[0]), len(_2dArray)) # f = open(filename, 'wb') # f.write(data) # f.close()
Example #8
Source File: view_perspective.py From Advanced_Lane_Lines with MIT License | 6 votes |
def test(): pickle_file = open("trans_pickle.p", "rb") trans_pickle = pickle.load(pickle_file) M = trans_pickle["M"] Minv = trans_pickle["Minv"] img_size = (1280, 720) image_files = glob.glob("../output_images/undistort/*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) file_name = file.split("\\")[-1] print(file_name) out_image = "../output_images/perspect_trans/"+file_name print(out_image) # convert to opencv BGR format warped = cv2.cvtColor(warped, cv2.COLOR_RGB2BGR) cv2.imwrite(out_image, warped)
Example #9
Source File: pymini_yolo.py From vrequest with MIT License | 6 votes |
def drawrect(img, rect, text): cv2.rectangle(img, tuple(rect[:2]), tuple(rect[2:]), (10,250,10), 2, 1) x, y = rect[:2] def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): from PIL import Image, ImageDraw, ImageFont img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) fontText = ImageFont.truetype( "font/simsun.ttc", textSize, encoding="utf-8") draw.text((left, top), text, textColor, font=fontText) return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) import re if re.findall('[\u4e00-\u9fa5]', text): img = cv2ImgAddText(img, text, x, y-12, (10,10,250), 12) # 如果存在中文则使用这种方式绘制文字 else: cv2.putText(img, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (10,10,250), 1) return img
Example #10
Source File: gui.py From DeepWarp with Apache License 2.0 | 6 votes |
def generate(path): global cur_rgb_image if cur_rgb_image is not None: print('process......') el_img, er_img, angle, re_angle, os_l, os_r = get_input_from_image() el, er = get_output_from_sess(el_img, er_img, angle, re_angle) new_image = np.copy(cur_rgb_image) new_image = helper.replace(new_image, el, os_l) rgb_new_image = helper.replace(new_image, er, os_r) # bgr_new_image = cv2.cvtColor(rgb_new_image, cv2.COLOR_RGB2BGR) # cv2.imshow('deepwarp', bgr_new_image) # if chk_btn.get() == True: # rgb_new_image = cv2.medianBlur(rgb_new_image, 3) global label_img img_wapper = ImageTk.PhotoImage(Image.fromarray(rgb_new_image)) label_img.configure(image=img_wapper) label_img.image = img_wapper return rgb_new_image else: print('no image......') return None
Example #11
Source File: eval.py From cvml_project with MIT License | 6 votes |
def do_demo(self, folder): save_dir = os.path.join(self.output_dir, 'predictions') if not os.path.isdir(save_dir): os.makedirs(save_dir) folder_dir = os.path.join(self.dataset_dir, folder, 'image_02', 'data', '*.*') images_files = sorted(glob.glob(folder_dir)) print(f'doing demo on {self.demo_set}... ') print(f'saving prediction to {save_dir}...') for i, img_path in enumerate(images_files): img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB) img = cv2.resize(img, (self.params.input_w, self.params.input_h)) img_input = tf.expand_dims(tf.convert_to_tensor(img, tf.float32) / 255., 0) outputs = self.val_step(img_input) disp = np.squeeze(outputs['disparity0'].numpy()) disp = visualize_colormap(disp) save_path = os.path.join(save_dir, f'{i}.png') big_image = np.zeros(shape=(self.params.input_h * 2, self.params.input_w, 3)) big_image[:self.params.input_h, ...] = img big_image[self.params.input_h:, ...] = disp cv2.imwrite(save_path, cv2.cvtColor(big_image.astype(np.uint8), cv2.COLOR_RGB2BGR)) print("\n-> Done!\n")
Example #12
Source File: input_video_stream_paste_mask.py From face-detection-induction-course with MIT License | 6 votes |
def start(self): """ 启动程序 :return: """ self.console("程序启动成功.") self.init_mask() while self.listener: frame = self.read_data() frame = resize(frame, width=self.max_width) img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(img_gray, 0) faces = self.orientation(rects, img_gray) draw_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) if self.doing: self.drawing(draw_img, faces) self.animation_time += self.speed self.save_data(draw_img) if self.animation_time > self.duration: self.doing = False self.animation_time = 0 else: frame = cv2.cvtColor(np.asarray(draw_img), cv2.COLOR_RGB2BGR) cv2.imshow("hello mask", frame) self.listener_keys()
Example #13
Source File: train.py From keras-image-segmentation with MIT License | 6 votes |
def train_generator(self, image_generator, mask_generator): # cv2.namedWindow('show', 0) # cv2.resizeWindow('show', 1280, 640) while True: image = next(image_generator) mask = next(mask_generator) label = self.make_regressor_label(mask).astype(np.float32) # print (image.dtype, label.dtype) # print (image.shape, label.shape) # exit() # cv2.imshow('show', image[0].astype(np.uint8)) # cv2.imshow('label', label[0].astype(np.uint8)) # mask = self.select_labels(mask) # print (image.shape) # print (mask.shape) # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # mask = (mask.astype(np.float32)*255/33).astype(np.uint8) # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET) # print (mask_color.shape) # show = cv2.addWeighted(image, 0.5, mask_color, 0.5, 0.0) # cv2.imshow("show", show) # key = cv2.waitKey() # if key == 27: # exit() yield (image, label)
Example #14
Source File: container.py From visual_dynamics with MIT License | 6 votes |
def add_datum(self, *inds, **datum_dict): other_dict = dict([item for item in datum_dict.items() if not item[0].endswith('image')]) super(ImageDataContainer, self).add_datum(*inds, **other_dict) image_dict = dict([item for item in datum_dict.items() if item[0].endswith('image')]) for image_name, image in image_dict.items(): if image_name in self.datum_shapes_dict and self.datum_shapes_dict[image_name] != image.shape: raise ValueError('unable to add datum %s with shape %s since the shape %s was expected' % (image_name, image.shape, self.datum_shapes_dict[image_name])) self.datum_shapes_dict[image_name] = image.shape image_fname = self._get_image_fname(*(inds + (image_name,))) if image.dtype == np.uint8: if image.ndim == 3 and image.shape[2] == 3: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) else: image = math_utils.pack_image(image) cv2.imwrite(image_fname, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
Example #15
Source File: icub.py From pyERA with MIT License | 6 votes |
def return_left_camera_image(self, mode='RGB'): """Return a numpy array with the LEFT camera image @param mode the image to return (default RGB) RGB: Red Green Blue image BGR: Blue Green Red (OpenCV) GRAY: Grayscale image """ self.port_left_camera.read(self.yarp_image) if(mode=='BGR'): return cv2.cvtColor(self.img_array, cv2.COLOR_RGB2BGR) elif(mode=='RGB'): return self.img_array elif(mode=='GRAY'): return cv2.cvtColor(self.img_array, cv2.COLOR_BGR2GRAY) else: return self.img_array
Example #16
Source File: icub.py From pyERA with MIT License | 6 votes |
def return_right_camera_image(self, mode='RGB'): """Return a numpy array with the RIGHT camera image @param mode the image to return (default RGB) RGB: Red Green Blue image BGR: Blue Green Red (OpenCV) GRAY: Grayscale image """ self.port_right_camera.read(self.yarp_image) if(mode=='BGR'): return cv2.cvtColor(self.img_array, cv2.COLOR_RGB2BGR) elif(mode=='RGB'): return self.img_array elif(mode=='GRAY'): return cv2.cvtColor(self.img_array, cv2.COLOR_BGR2GRAY) else: return self.img_array
Example #17
Source File: mx2tfrecords.py From InsightFace_TF with MIT License | 6 votes |
def mx2tfrecords_old(imgidx, imgrec, args): output_path = os.path.join(args.tfrecords_file_path, 'tran.tfrecords') writer = tf.python_io.TFRecordWriter(output_path) for i in imgidx: img_info = imgrec.read_idx(i) header, img = mx.recordio.unpack(img_info) encoded_jpg_io = io.BytesIO(img) image = PIL.Image.open(encoded_jpg_io) np_img = np.array(image) img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR) img_raw = img.tobytes() label = int(header.label) example = tf.train.Example(features=tf.train.Features(feature={ 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])) })) writer.write(example.SerializeToString()) # Serialize To String if i % 10000 == 0: print('%d num image processed' % i) writer.close()
Example #18
Source File: eval_data_reader.py From InsightFace_TF with MIT License | 6 votes |
def mx2tfrecords(imgidx, imgrec, args): output_path = os.path.join(args.tfrecords_file_path, 'tran.tfrecords') writer = tf.python_io.TFRecordWriter(output_path) for i in imgidx: img_info = imgrec.read_idx(i) header, img = mx.recordio.unpack(img_info) encoded_jpg_io = io.BytesIO(img) image = PIL.Image.open(encoded_jpg_io) np_img = np.array(image) img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR) img_raw = img.tobytes() label = int(header.label) example = tf.train.Example(features=tf.train.Features(feature={ 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])) })) writer.write(example.SerializeToString()) # Serialize To String if i % 10000 == 0: print('%d num image processed' % i) writer.close()
Example #19
Source File: eval_data_reader.py From InsightFace_TF with MIT License | 6 votes |
def load_bin(db_name, image_size, args): bins, issame_list = pickle.load(open(os.path.join(args.eval_db_path, db_name+'.bin'), 'rb'), encoding='bytes') data_list = [] for _ in [0,1]: data = np.empty((len(issame_list)*2, image_size[0], image_size[1], 3)) data_list.append(data) for i in range(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin).asnumpy() img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for flip in [0,1]: if flip == 1: img = np.fliplr(img) data_list[flip][i, ...] = img i += 1 if i % 1000 == 0: print('loading bin', i) print(data_list[0].shape) return data_list, issame_list
Example #20
Source File: seg_parser.py From openseg.pytorch with MIT License | 6 votes |
def colorize(self, label_map, image_canvas=None): height, width = label_map.shape color_dst = np.zeros((height, width, 3), dtype=np.uint8) color_list = self.configer.get('details', 'color_list') for i in range(self.configer.get('data', 'num_classes')): color_dst[label_map == i] = color_list[i % len(color_list)] color_img_rgb = np.array(color_dst, dtype=np.uint8) color_img_bgr = cv2.cvtColor(color_img_rgb, cv2.COLOR_RGB2BGR) if image_canvas is not None: image_canvas = cv2.addWeighted(image_canvas, 0.6, color_img_bgr, 0.4, 0) return image_canvas else: return color_img_bgr
Example #21
Source File: extract_pool5.py From zero-shot-gcn with MIT License | 6 votes |
def preprocess_inception(image_name): image = cv2.imread(image_name) if image is None: return None image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) target_size = 256 crop_size = 224 im_size_min = np.min(image.shape[0:2]) im_scale = float(target_size) / float(im_size_min) image = cv2.resize(image, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) height = image.shape[0] width = image.shape[1] x = int((width - crop_size) / 2) y = int((height - crop_size) / 2) image = image[y: y + crop_size, x: x + crop_size] save_dir = '/nfs.yoda/xiaolonw/judy_folder/transfer/debug/' if not os.path.exists(save_dir): os.makedirs(save_dir) cv2.imwrite(save_dir + '1.jpg', cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) image = image.astype(np.float32) image /= 255 image = 2 * image - 1 image = image[np.newaxis, :, :, :] return image
Example #22
Source File: loader_factory.py From Pytorch-Networks with MIT License | 5 votes |
def inverse_preprocess(image): image = image.numpy().transpose((1,2,0)) * 255 image = image.astype(np.uint8) if image.shape[2] == 3: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) #pass return image
Example #23
Source File: FileTools.py From TENet with MIT License | 5 votes |
def _pil2cv(img): im = np.asarray(img) if im.shape[2] == 3: return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) else: return im
Example #24
Source File: image.py From GuidedFilter with MIT License | 5 votes |
def rgb2bgr(img): bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) return bgr ## RGB to Lab.
Example #25
Source File: utils.py From Airtest with Apache License 2.0 | 5 votes |
def pil_2_cv2(pil_image): open_cv_image = np.array(pil_image) # Convert RGB to BGR (method-1): open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_RGB2BGR) # Convert RGB to BGR (method-2): # b, g, r = cv2.split(open_cv_image) # open_cv_image = cv2.merge([r, g, b]) return open_cv_image
Example #26
Source File: streamtest.py From telloCV with Apache License 2.0 | 5 votes |
def main(): # Set up tello streaming drone = tellopy.Tello() drone.log.set_level(2) drone.connect() drone.start_video() # container for processing the packets into frames container = av.open(drone.get_video_stream()) video_st = container.streams.video[0] # stream and outputfile for video output = av.open('archive.mp4', 'w') ovstream = output.add_stream('mpeg4', video_st.rate) ovstream.pix_fmt = 'yuv420p' ovstream.width = video_st.width ovstream.height = video_st.height counter = 0 save = True for packet in container.demux((video_st,)): for frame in packet.decode(): # convert frame to cv2 image and show image = cv2.cvtColor(numpy.array( frame.to_image()), cv2.COLOR_RGB2BGR) cv2.imshow('frame', image) key = cv2.waitKey(1) & 0xFF # save initial 1300 frames if save: new_frame = av.VideoFrame( width=frame.width, height=frame.height, format=frame.format.name) for i in range(len(frame.planes)): new_frame.planes[i].update(frame.planes[i]) encode(new_frame, ovstream, output) counter += 1 print("Frames encoded:", counter) if counter > 300: output.close() save == False
Example #27
Source File: demo_test.py From SCNN_Pytorch with MIT License | 5 votes |
def main(): args = parse_args() img_path = args.img_path weight_path = args.weight_path img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = transform_img({'img': img})['img'] x = transform_to_net({'img': img})['img'] x.unsqueeze_(0) save_dict = torch.load(weight_path, map_location='cpu') net.load_state_dict(save_dict['net']) net.eval() seg_pred, exist_pred = net(x)[:2] seg_pred = seg_pred.detach().cpu().numpy() exist_pred = exist_pred.detach().cpu().numpy() seg_pred = seg_pred[0] exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)] img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) lane_img = np.zeros_like(img) color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8') coord_mask = np.argmax(seg_pred, axis=0) for i in range(0, 4): if exist_pred[0, i] > 0.5: lane_img[coord_mask == (i + 1)] = color[i] img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.) cv2.imwrite("demo/demo_result.jpg", img) for x in getLane.prob2lines_CULane(seg_pred, exist): print(x) if args.visualize: print([1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]) cv2.imshow("", img) cv2.waitKey(0) cv2.destroyAllWindows()
Example #28
Source File: trump.py From Trusty-cogs-archive with MIT License | 5 votes |
def generateText(self, text): # global impact, textFont # image = Image.open('text.png') txtColor = (20, 20, 20) bgColor = (224, 233, 237) # bgColor = (100, 0, 0) imgSize = (160, 200) # Create image image = Image.new("RGB", imgSize, bgColor) # Draw text on top draw = ImageDraw.Draw(image) # Load font for text if self.textFont == None: self.textFont = await self.computeAndLoadTextFontForSize(draw, text, imgSize[0]) w, h = draw.textsize(text, font=self.textFont) xCenter = (imgSize[0] - w) / 2 yCenter = (50 - h) / 2 draw.text((xCenter, 10 + yCenter), text, font=self.textFont, fill=txtColor) draw.text((12, 70), "IS NOW", font=self.impact, fill=txtColor) draw.text((10, 130), "ILLEGAL", font=self.impact, fill=txtColor) # Convert to CV2 cvImage = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # cv2.imshow('text', cvImage) return cvImage
Example #29
Source File: realtime_demo.py From Convolutional-Pose-Machine-tf with GNU Lesser General Public License v3.0 | 5 votes |
def run(self, debug=False): """ Run a Live demo """ cap = cv2.VideoCapture(0) # Fit the net's resolution to boost performance # It's important cap.set(3,self.model.in_size) cap.set(4,self.model.in_size) while(cap.isOpened()): # get a frame ret, frame = cap.read() if ret: if debug: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) ret_img, t_pred, t_postp = self.__estimate__(frame) cv2.imshow("Real Time Demo", cv2.cvtColor(ret_img, cv2.COLOR_RGB2BGR)) else: try: # show a frame t1 = time.time() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # This is the general part of RT Demo ret_img, t_pred, t_postp = self.__estimate__(frame) print("Cost: predict %.2f ms. project %.2f ms."%((t_pred - t1)*1000, (t_postp-t_pred)*1000)) ret_img = cv2.putText(ret_img, "Cost: %.2f ms %.2f FPS. Press q to quit."%((t_pred-t1)*1000, 1/(t_pred-t1)), (10, 10), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,0,255), 2) cv2.imshow("Benchmark", cv2.cvtColor(ret_img, cv2.COLOR_RGB2BGR)) except Exception as e: #wont interrupt RT demo print("[!] ERROR occured!! "+str(e)) else: print(ret) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
Example #30
Source File: demo.py From SiamFC-tf with MIT License | 5 votes |
def postprocess(img): res = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) return res