Python cv2.COLOR_BGR2GRAY Examples
The following are 30
code examples of cv2.COLOR_BGR2GRAY().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: motion.py From object-detection with MIT License | 10 votes |
def prediction(self, image): image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.GaussianBlur(image, (21, 21), 0) if self.avg is None: self.avg = image.copy().astype(float) cv2.accumulateWeighted(image, self.avg, 0.5) frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg)) thresh = cv2.threshold( frameDelta, DELTA_THRESH, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours( thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) self.avg = image.copy().astype(float) return cnts
Example #2
Source File: calibrate_camera.py From derplearning with MIT License | 9 votes |
def live_undistort(camera, camera_matrix, distortion_coefficients): """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame""" scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix( camera_matrix, distortion_coefficients, camera.size, 1, camera.size ) while True: ret, frame = camera.cap.read() assert ret distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) undistorted_frame = cv2.undistort( distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix, ) roi_x, roi_y, roi_w, roi_h = roi cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w] cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame) cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame) cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame) cv2.waitKey(10)
Example #3
Source File: tkinter_functions.py From simba with GNU Lesser General Public License v3.0 | 8 votes |
def clahe(filename): os.chdir(os.path.dirname(filename)) print('Applying CLAHE, this might take awhile...') currentVideo = os.path.basename(filename) fileName, fileEnding = currentVideo.split('.',2) saveName = str('CLAHE_') + str(fileName) + str('.avi') cap = cv2.VideoCapture(currentVideo) imageWidth = int(cap.get(3)) imageHeight = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0) try: while True: ret, image = cap.read() if ret == True: im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) claheCorrecttedFrame = claheFilter.apply(im) out.write(claheCorrecttedFrame) if cv2.waitKey(10) & 0xFF == ord('q'): break else: print(str('Completed video ') + str(saveName)) break except: print('clahe not applied') cap.release() out.release() cv2.destroyAllWindows() return saveName
Example #4
Source File: histcomparison.py From pedestrian-haar-based-detector with GNU General Public License v2.0 | 8 votes |
def main(): imagePath = "img.jpg" img = cv2.imread(imagePath) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) generate_histogram(gray) cv2.imwrite("before.jpg", gray) gray = cv2.equalizeHist(gray) generate_histogram(gray) cv2.imwrite("after.jpg",gray) return 0
Example #5
Source File: camera_calibration.py From Advanced_Lane_Lines with MIT License | 7 votes |
def test(): """ read the pickle file on disk and implement undistor on image show the oringal/undistort image """ print("Reading the pickle file...") pickle_file = open("./camera_cal.p", "rb") dist_pickle = pickle.load(pickle_file) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] pickle_file.close() print("Reading the sample image...") img = cv2.imread('corners_founded/corners_found13.jpg') img_size = (img.shape[1],img.shape[0]) dst = cv2.undistort(img, mtx, dist, None, mtx) # dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) # Visualize undistortion print("Visulize the result...") f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10)) ax1.imshow(img), ax1.set_title('Original Image', fontsize=15) ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15) plt.show()
Example #6
Source File: picam.py From PiCamNN with MIT License | 7 votes |
def movement(mat_1,mat_2): mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY) mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1)) _,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0) mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY) mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1)) _,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0) mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray) mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2)) _,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0) mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval))) mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4))) _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0:return True #If there were any movements return False #if not #Pedestrian Recognition Thread
Example #7
Source File: face_detect.py From face-search with MIT License | 7 votes |
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'): """ Detect the face from the image, return colored face """ cc = cv2.CascadeClassifier(os.path.abspath(cc_path)) img_path = os.path.abspath(img_path) img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = cc.detectMultiScale(gray, 1.3, 5) roi_color = None if len(faces) == 0: logging.exception(img_path + ': No face found') else: x,y,w,h = faces[0] _h, _w = compute_size(h, w) roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w] return roi_color
Example #8
Source File: detect.py From pedestrian-haar-based-detector with GNU General Public License v2.0 | 7 votes |
def main(): #IMG PATHS imagePath = "test3.jpg" cascPath = "cascades/haarcascade_pedestrian.xml" pplCascade = cv2.CascadeClassifier(cascPath) image = cv2.imread(imagePath) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = normalize_grayimage(gray) pedestrians = pplCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=10, minSize=(32,96), flags = cv2.cv.CV_HAAR_SCALE_IMAGE ) print "Found {0} ppl!".format(len(pedestrians)) #Draw a rectangle around the detected objects for (x, y, w, h) in pedestrians: cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) cv2.imwrite("saida.jpg", image) cv2.imshow("Ppl found", image) cv2.waitKey(0) return 0
Example #9
Source File: test_detection.py From object-detection with MIT License | 6 votes |
def test_motion(): image = cv2.imread("./imgs/image.jpeg") print(image.shape) detector = Detector_Motion() image2 = cv2.imread("./imgs/image_box.jpg") print(image2.shape) assert image.shape == image2.shape image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) image2 = cv2.GaussianBlur(image2, (21, 21), 0) detector.avg = image2.astype(float) output = detector.prediction(image) df = detector.filter_prediction(output, image) image = detector.draw_boxes(image, df) print(df) assert df.shape[0] == 1 cv2.imwrite("./imgs/outputcv.jpg", image)
Example #10
Source File: face.py From face-search with MIT License | 6 votes |
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'): """ Detect the face from the image, return colored face """ cc = cv2.CascadeClassifier(os.path.abspath(cc_path)) img_path = os.path.abspath(img_path) img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = cc.detectMultiScale(gray, 1.3, 5) roi_color = None if len(faces) == 0: logging.exception(img_path + ': No face found') else: x,y,w,h = faces[0] _h, _w = compute_size(h, w) roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w] return roi_color
Example #11
Source File: imgproc.py From dataflow with Apache License 2.0 | 6 votes |
def _augment(self, img, r): old_dtype = img.dtype if img.ndim == 3: if self.rgb is not None: m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY grey = cv2.cvtColor(img.astype('float32'), m) mean = np.mean(grey) else: mean = np.mean(img, axis=(0, 1), keepdims=True) else: mean = np.mean(img) img = img * r + mean * (1 - r) if self.clip or old_dtype == np.uint8: img = np.clip(img, 0, 255) return img.astype(old_dtype)
Example #12
Source File: emotionclassification.py From ConvolutionalEmotion with MIT License | 6 votes |
def getPeakFeatures(): net = DecafNet() features = numpy.zeros((number_sequences,feature_length)) labels = numpy.zeros((number_sequences,1)) counter = 0 # Maybe sort them for participant in os.listdir(os.path.join(data_dir,image_dir)): for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)): if sequence != ".DS_Store": image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence))) image_file = image_files[-1] print counter, image_file imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file)) imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY) scores = net.classify(imarray, center_only=True) features[counter] = net.feature(feature_level)#.flatten() label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt")) labels[counter] = eval(label_file.read()) label_file.close() counter += 1 numpy.save("featuresPeak5",features) numpy.save("labelsPeak5",labels)
Example #13
Source File: lk_track.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def run(self): while True: ret, frame = self.cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) cv2.circle(vis, (x, y), 2, (0, 255, 0), -1) self.tracks = new_tracks cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in self.tracks]: cv2.circle(mask, (x, y), 5, 0, -1) p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) self.frame_idx += 1 self.prev_gray = frame_gray cv2.imshow('lk_track', vis) ch = cv2.waitKey(1) if ch == 27: break
Example #14
Source File: demo.py From VTuber_Unity with MIT License | 6 votes |
def get_face(detector, image, cpu=False): if cpu: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) try: box = detector(image)[0] x1 = box.left() y1 = box.top() x2 = box.right() y2 = box.bottom() return [x1, y1, x2, y2] except: return None else: image = cv2.resize(image, None, fx=0.5, fy=0.5) box = detector.detect_from_image(image)[0] if box is None: return None return (2*box[:4]).astype(int)
Example #15
Source File: my.py From 3D-HourGlass-Network with MIT License | 6 votes |
def test_heatmaps(heatmaps,img,i): heatmaps=heatmaps.numpy() #heatmaps=np.squeeze(heatmaps) heatmaps=heatmaps[:,:64,:] heatmaps=heatmaps.transpose(1,2,0) print('heatmap inside shape is',heatmaps.shape) ## print('----------------here') ## print(heatmaps.shape) img=img.numpy() #img=np.squeeze(img) img=img.transpose(1,2,0) img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # print('heatmaps',heatmaps.shape) heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4) # print('heatmapsafter',heatmaps.shape) for j in range(0, 16): heatmap = heatmaps[:,:,j] heatmap = heatmap.reshape((256,256,1)) heatmapimg = np.array(heatmap * 255, dtype = np.uint8) heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET) heatmap = heatmap/255 plt.imshow(img) plt.imshow(heatmap, alpha=0.5) plt.show() #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png')
Example #16
Source File: covermatcher.py From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 | 6 votes |
def search(self, query_keypoints, query_descriptors): # Initialize the dictionary of results results = {} # Loop over the book cover images for path in self.cover_paths: # Load the query image, convert it to greyscale, and extract keypoints and descriptors cover = cv2.imread(path) gray = cv2.cvtColor(cover, cv2.COLOR_BGR2GRAY) (keypoints, descriptors) = self.descriptor.describe(gray) # Determine the number of matched, inlier keypoints, and update the results score = self.match(query_keypoints, query_descriptors, keypoints, descriptors) results[path] = score # If matches were found, sort them if len(results) > 0: results = sorted([(v, k) for (k, v) in results.items() if v > 0], reverse=True) # Return the results return results
Example #17
Source File: coherence.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4): h, w = img.shape[:2] for i in xrange(iter_n): print(i) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3) eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] x, y = eigen[:,:,1,0], eigen[:,:,1,1] gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma) gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma) gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma) gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy m = gvv < 0 ero = cv2.erode(img, None) dil = cv2.dilate(img, None) img1 = ero img1[m] = dil[m] img = np.uint8(img*(1.0 - blend) + img1*blend) print('done') return img
Example #18
Source File: my.py From 3D-HourGlass-Network with MIT License | 6 votes |
def test_heatmaps(heatmaps,img,i): heatmaps=heatmaps.numpy() #heatmaps=np.squeeze(heatmaps) heatmaps=heatmaps[:,:64,:] heatmaps=heatmaps.transpose(1,2,0) print('heatmap inside shape is',heatmaps.shape) ## print('----------------here') ## print(heatmaps.shape) img=img.numpy() #img=np.squeeze(img) img=img.transpose(1,2,0) img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # print('heatmaps',heatmaps.shape) heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4) # print('heatmapsafter',heatmaps.shape) for j in range(0, 16): heatmap = heatmaps[:,:,j] heatmap = heatmap.reshape((256,256,1)) heatmapimg = np.array(heatmap * 255, dtype = np.uint8) heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET) heatmap = heatmap/255 plt.imshow(img) plt.imshow(heatmap, alpha=0.5) plt.show() #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png')
Example #19
Source File: stitch_patches_page.py From ScanSSD with MIT License | 6 votes |
def find_blank_rows_h(image): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) im_bw = np.zeros(gray_image.shape) im_bw[gray_image > 127] = 0 im_bw[gray_image <= 127] = 1 row_sum = np.sum(im_bw, axis=1) cum_sum = np.zeros(row_sum.shape) cum_sum[0] = row_sum[0] for i, sum in enumerate(row_sum[1:]): cum_sum[i+1] = cum_sum[i] + sum blank_rows = [] for i, sum in enumerate(cum_sum): if is_blank(cum_sum, i): blank_rows.append(i) return blank_rows # check n last rows
Example #20
Source File: localVerifyCode.py From 12306 with MIT License | 5 votes |
def get_text(img, offset=0): text = pretreatment.get_text(img, offset) text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY) text = text / 255.0 h, w = text.shape text.shape = (1, h, w, 1) return text
Example #21
Source File: dlib_detector.py From VTuber_Unity with MIT License | 5 votes |
def detect_from_image(self, tensor_or_path): image = self.tensor_or_path_to_ndarray(tensor_or_path, rgb=False) detected_faces = self.face_detector(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)) if 'cuda' not in self.device: detected_faces = [[d.left(), d.top(), d.right(), d.bottom()] for d in detected_faces] else: detected_faces = [[d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()] for d in detected_faces] return detected_faces
Example #22
Source File: run_gui_backup2.py From openag_brain_box with GNU General Public License v3.0 | 5 votes |
def computeCanny(surface): array = pygame.surfarray.pixels3d(surface) array = cv2.cvtColor(array, cv2.COLOR_BGR2GRAY) array = cv2.GaussianBlur(array, (5, 5), 0) array = cv2.Canny(array, 30, 150) return pygame.surfarray.make_surface(array)
Example #23
Source File: gui.py From openag_brain_box with GNU General Public License v3.0 | 5 votes |
def computeCanny(self, surface): array = pygame.surfarray.pixels3d(surface) # print(array.shape) array = cv2.cvtColor(array, cv2.COLOR_BGR2GRAY) array = cv2.GaussianBlur(array, (5, 5), 0) array = cv2.Canny(array, 30, 150) return pygame.surfarray.make_surface(array)
Example #24
Source File: stitch_patches_page.py From ScanSSD with MIT License | 5 votes |
def convert_to_binary(image): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) im_bw = np.zeros(gray_image.shape) im_bw[gray_image > 127] = 0 im_bw[gray_image <= 127] = 1 return im_bw
Example #25
Source File: process_videos_automation.py From simba with GNU Lesser General Public License v3.0 | 5 votes |
def clahe_queue(files): filesFound= [files] os.chdir(os.path.dirname(files)) print('Applying CLAHE, this might take awhile...') for i in filesFound: currentVideo = os.path.basename(i) saveName = str('CLAHE_') + str(currentVideo[:-4]) + str('.avi') cap = cv2.VideoCapture(currentVideo) imageWidth = int(cap.get(3)) imageHeight = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0) while True: ret, image = cap.read() if ret == True: im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) claheCorrecttedFrame = claheFilter.apply(im) out.write(claheCorrecttedFrame) if cv2.waitKey(10) & 0xFF == ord('q'): break else: print(str('Completed video ') + str(saveName)) break cap.release() out.release() cv2.destroyAllWindows() return saveName
Example #26
Source File: process_videos_automation.py From simba with GNU Lesser General Public License v3.0 | 5 votes |
def clahe_batch(directory): filesFound= [] ########### FIND FILES ########### for i in os.listdir(directory): filesFound.append(i) os.chdir(directory) print('Applying CLAHE, this might take awhile...') for i in filesFound: currentVideo = i saveName = str('CLAHE_') + str(currentVideo[:-4]) + str('.avi') cap = cv2.VideoCapture(currentVideo) imageWidth = int(cap.get(3)) imageHeight = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0) while True: ret, image = cap.read() if ret == True: im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) claheCorrecttedFrame = claheFilter.apply(im) out.write(claheCorrecttedFrame) if cv2.waitKey(10) & 0xFF == ord('q'): break else: print(str('Completed video ') + str(saveName)) break cap.release() out.release() cv2.destroyAllWindows() return saveName
Example #27
Source File: stitch_patches_page.py From ScanSSD with MIT License | 5 votes |
def find_blank_rows(image, line_spacing=1): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blank_rows = np.all(gray_image == 255, axis=1) im_bw = np.zeros(gray_image.shape) im_bw[blank_rows] = 255 #gray_image[~blank_rows] = 0 #cv2.imwrite("/home/psm2208/code/eval/test.png", im_bw) labeled, ncomponents = ndimage.label(im_bw) rows = [] indices = np.indices(im_bw.shape).T[:, :, [1, 0]] line_bbs = ndimage.find_objects(labeled) sizes = np.array([[bb.stop - bb.start for bb in line_bb] for line_bb in line_bbs]) sizes = sizes[:,0] mask = (sizes > line_spacing) idx = np.flatnonzero(mask) for i in idx: labels = (labeled == (i+1)) pixels = indices[labels.T] box = [min(pixels[:, 0]), min(pixels[:, 1]), max(pixels[:, 0]), max(pixels[:, 1])] rows.append(box) return rows
Example #28
Source File: process_videos_automation.py From simba with GNU Lesser General Public License v3.0 | 5 votes |
def clahe_auto(directory): filesFound= [] ########### FIND FILES ########### for i in os.listdir(directory): if i.__contains__(".mp4"): filesFound.append(i) os.chdir(directory) print('Applying CLAHE, this might take awhile...') for i in filesFound: currentVideo = i saveName = str('CLAHE_') + str(currentVideo[:-4]) + str('.avi') cap = cv2.VideoCapture(currentVideo) imageWidth = int(cap.get(3)) imageHeight = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0) while True: ret, image = cap.read() if ret == True: im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) claheCorrecttedFrame = claheFilter.apply(im) out.write(claheCorrecttedFrame) if cv2.waitKey(10) & 0xFF == ord('q'): break else: print(str('Completed video ') + str(saveName)) break cap.release() out.release() cv2.destroyAllWindows() return saveName
Example #29
Source File: dista.py From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 | 5 votes |
def find_marker(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 35, 125) (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) c = max(cnts, key = cv2.contourArea) # compute the bounding box of the of the paper region and return it return cv2.minAreaRect(c)
Example #30
Source File: create_gt_math.py From ScanSSD with MIT License | 5 votes |
def convert_to_binary(image): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) im_bw = np.zeros(gray_image.shape) im_bw[gray_image > 127] = 0 im_bw[gray_image <= 127] = 1 return im_bw