Python cv2.CAP_PROP_POS_MSEC Examples

The following are 24 code examples of cv2.CAP_PROP_POS_MSEC(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: frame_extractor.py    From keras-video-classifier with MIT License 7 votes vote down vote up
def extract_features(video_input_file_path, feature_output_file_path):
    if os.path.exists(feature_output_file_path):
        return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (40, 40), interpolation=cv2.INTER_AREA)
            features.append(image)
            count = count + 1
    unscaled_features = np.array(features)
    print(unscaled_features.shape)
    np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
Example #2
Source File: video_tools.py    From dnn-model-services with MIT License 6 votes vote down vote up
def get_video_frames(video_path, frames_path, start_time_ms, stop_time_ms, pace):
    try:
        cap = cv2.VideoCapture(video_path)
        # Set start position
        cap.set(cv2.CAP_PROP_POS_MSEC, start_time_ms)

        fps = cap.get(cv2.CAP_PROP_FPS)

        # If stop_time_ms == 0, get the entire video
        if stop_time_ms == 0:
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            stop_time_ms = int(frame_count/fps)*1000

        # If pace == 0, get 1 image every FPS/10
        if pace == 0:
            pace = int(1000/(fps/10))

        frames_list = []
        ok = True
        current_frame = 1
        while ok and cap.get(cv2.CAP_PROP_POS_MSEC) <= stop_time_ms:
            ok, frame = cap.read()
            if not ok:
                break
            frame_path = '{}/frame_{:03}.jpg'.format(frames_path, current_frame)
            log.debug('Storing: {}'.format(frame_path))
            cv2.imwrite(frame_path, frame)
            frames_list.append(frame_path)
            current_frame += 1
            cap.set(cv2.CAP_PROP_POS_MSEC, cap.get(cv2.CAP_PROP_POS_MSEC) + pace)

        cap.release()
        cv2.destroyAllWindows()
        return True, frames_list
    except Exception as e:
        log.error(e)
        return False, [] 
Example #3
Source File: opencv_gstreamer.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def _read_frame_func(self):
        ret, frame = self.video_capture.read()
        if not ret:
            raise EOFError()

        if self.frame_skip_rt:
            systime_msec = self.get_tick()
            video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
            assert systime_msec >= 0

            skip = video_msec < systime_msec
            while skip:
                ret, frame_ = self.video_capture.read()

                if not ret:
                    break

                frame = frame_
                video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
                skip = video_msec < systime_msec

        return frame 
Example #4
Source File: vgg16_feature_extractor.py    From keras-video-classifier with MIT License 6 votes vote down vote up
def extract_vgg16_features(model, video_input_file_path, feature_output_file_path):
    if os.path.exists(feature_output_file_path):
        return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
Example #5
Source File: opencv_file.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def _read_frame_func(self):
        ret, frame = self.video_capture.read()
        if not ret:
            raise EOFError()

        if self.frame_skip_rt:
            systime_msec = self.get_tick()
            video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
            assert systime_msec >= 0

            skip = video_msec < systime_msec
            while skip:
                ret, frame_ = self.video_capture.read()

                if not ret:
                    break

                frame = frame_
                video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
                skip = video_msec < systime_msec

        return frame

    # override 
Example #6
Source File: vgg16_feature_extractor.py    From keras-video-classifier with MIT License 6 votes vote down vote up
def extract_vgg16_features_live(model, video_input_file_path):
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    count = 0
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    return unscaled_features 
Example #7
Source File: extract_images.py    From chinese-subtitle-ocr with MIT License 5 votes vote down vote up
def main():
    for video in glob.glob(VIDEO_FOLDER + "/*"):
        if ".srt" in video:
            continue
        print(video)
        video_name = os.path.splitext(os.path.basename(video))[0]

        cap = cv2.VideoCapture(video)
        max_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        cap.set(cv2.CAP_PROP_POS_FRAMES, max_frames // 4)

        srt_subtitle = parse_subtitle(os.path.join(VIDEO_FOLDER, video_name + ".srt"))
        if srt_subtitle:
            cap.set(cv2.CAP_PROP_POS_MSEC, srt_subtitle[0][0])

        pos_frame = 0
        i = 0
        pos_subtitle = 0
        while pos_frame < max_frames and i < MAX_IMAGES_PER_VIDEO:
            pos_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
            msec = cap.get(cv2.CAP_PROP_POS_MSEC)
            print("{}/{}".format(i + 1, MAX_IMAGES_PER_VIDEO), end="\r")
            _, frame = cap.read()

            if pos_frame % SKIP_FRAMES > 0:
                continue

            if not srt_subtitle or (msec >= srt_subtitle[pos_subtitle][0] and msec <= srt_subtitle[pos_subtitle][1]):
                cv2.imwrite(os.path.join(IMAGE_FOLDER, "{}_{}.jpg").format(video_name, pos_frame), frame)
                i += 1
            elif msec > srt_subtitle[pos_subtitle][1]:
                pos_subtitle += 1
                if pos_subtitle >= len(srt_subtitle):
                    break
                cap.set(cv2.CAP_PROP_POS_MSEC, srt_subtitle[pos_subtitle][0]) 
Example #8
Source File: video.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def current_time(self):
        return self.get(cv2.CAP_PROP_POS_MSEC) 
Example #9
Source File: frame_extractor.py    From keras-video-classifier with MIT License 5 votes vote down vote up
def extract_videos_for_conv2d(video_input_file_path, feature_output_file_path, max_frames):
    if feature_output_file_path is not None:
        if os.path.exists(feature_output_file_path):
            return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success and count < max_frames:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            image = cv2.resize(image, (240, 240), interpolation=cv2.INTER_AREA)
            channels = image.shape[2]
            for channel in range(channels):
                features.append(image[:, :, channel])
            count = count + 1
    unscaled_features = np.array(features)
    unscaled_features = np.transpose(unscaled_features, axes=(1, 2, 0))
    print(unscaled_features.shape)
    if feature_output_file_path is not None:
        np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
Example #10
Source File: frame_extractor.py    From keras-video-classifier with MIT License 5 votes vote down vote up
def extract_images(video_input_file_path, image_output_dir_path):
    if os.path.exists(image_output_dir_path):
        return
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            cv2.imwrite(image_output_dir_path + os.path.sep + "frame%d.jpg" % count, image)  # save frame as JPEG file
            count = count + 1 
Example #11
Source File: entity.py    From nideep with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def timestamps_sec(self):
        """ Get timestamp of each frame from video file """
        cap = cv2.VideoCapture(self.path_video)
        ret = True
        t = []
        while cap.isOpened() and ret:
            # Capture frame-by-frames
            t.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.)
            ret = cap.grab()
        # When everything done, release the capture
        cap.release()
        return t 
Example #12
Source File: dataset.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def getImage(self, frame_id):
        # retrieve the first image if its id is > 0 
        if self.is_init is False and frame_id > 0:
            self.is_init = True 
            self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
        self.is_init = True
        ret, image = self.cap.read()
        #self._timestamp = time.time()  # rough timestamp if nothing else is available 
        self._timestamp = float(self.cap.get(cv2.CAP_PROP_POS_MSEC)*1000)
        self._next_timestamp = self._timestamp + self.Ts 
        if ret is False:
            print('ERROR while reading from file: ', self.filename)
        return image 
Example #13
Source File: video.py    From learning-blind-motion-deblurring with Apache License 2.0 5 votes vote down vote up
def jump(self, frame=None, ms=None):
        assert frame is not ms, "Use either frame or ms, not both!"
        if frame:
            if frame >= self.frames:
                raise ReadFrameException('Cannot jump to frame (frame does not exists)')
            self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame)
        if ms:
            self.vid.set(cv2.CAP_PROP_POS_MSEC, ms)
        # print("jumped to frame %i" % self.vid.get(cv2.CAP_PROP_POS_FRAMES)) 
Example #14
Source File: video_utilities.py    From CvStudio with MIT License 5 votes vote down vote up
def extract_frame_cv2(cls, video_file):
        try:
            vidcap = cv2.VideoCapture(video_file)
            vidcap.set(cv2.CAP_PROP_POS_MSEC, 2000)  # just cue to 20 sec. position
            success, image = vidcap.read()
            if success:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                return image
        except:
            return None 
Example #15
Source File: toolbox.py    From stagesepx with MIT License 5 votes vote down vote up
def get_current_frame_time(video_cap: cv2.VideoCapture) -> float:
    # same as get_current_frame_id, take good care of them
    return video_cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 
Example #16
Source File: extract_tfrecords_main.py    From youtube8mchallenge with Apache License 2.0 5 votes vote down vote up
def frame_iterator(filename, every_ms=1000, max_num_frames=300):
  """Uses OpenCV to iterate over all frames of filename at a given frequency.

  Args:
    filename: Path to video file (e.g. mp4)
    every_ms: The duration (in milliseconds) to skip between frames.
    max_num_frames: Maximum number of frames to process, taken from the
      beginning of the video.

  Yields:
    RGB frame with shape (image height, image width, channels)
  """
  video_capture = cv2.VideoCapture()
  if not video_capture.open(filename):
    print >> sys.stderr, 'Error: Cannot open video file ' + filename
    return
  last_ts = -99999  # The timestamp of last retrieved frame.
  num_retrieved = 0

  while num_retrieved < max_num_frames:
    # Skip frames
    while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts:
      if not video_capture.read()[0]:
        return

    last_ts = video_capture.get(CAP_PROP_POS_MSEC)
    has_frames, frame = video_capture.read()
    if not has_frames:
      break
    yield frame
    num_retrieved += 1 
Example #17
Source File: cameras.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _time_stamp(self):
        if self._use_wall_clock:
            now = time.time()
            return now - self._start_time
        time_s = self.capture.get(CAP_PROP_POS_MSEC) / 1e3
        return time_s 
Example #18
Source File: ped_det_server.py    From deep_sort_pytorch with MIT License 5 votes vote down vote up
def run(self):
        idx_frame = 0
        pbar = tqdm(total=self.total_frames + 1)
        while self.vdo.grab():
            if idx_frame % args.frame_interval == 0:
                _, ori_im = self.vdo.retrieve()
                timestamp = self.vdo.get(cv2.CAP_PROP_POS_MSEC)
                frame_id = int(self.vdo.get(cv2.CAP_PROP_POS_FRAMES))
                self.logger.add_frame(frame_id=frame_id, timestamp=timestamp)
                self.detection(frame=ori_im, frame_id=frame_id)
                self.save_frame(ori_im)
                idx_frame += 1
            pbar.update()
        self.logger.json_output(self.json_output) 
Example #19
Source File: CThermal.py    From Thermal_Image_Analysis with MIT License 5 votes vote down vote up
def split_visual(self,visual_video, fps, fps_ratio, output_folder='visual_frames'):
        '''
        Splits video into frames based on the actual fps, and time between frames of the thermal sequence.
        There is a sync issue where the thermal fps, and visual fps don't have an integer LCM/if LCM is v large. Have to try motion interpolation to fix this
        '''

        output_folder = Path(output_folder)
        output_folder.mkdir(exist_ok=True)
        vid = cv.VideoCapture(visual_video)
        skip_frames = round(fps_ratio)
        total_frames = vid.get(cv.CAP_PROP_FRAME_COUNT)
        current_frame = 0
        thermal_fps = fps * (1/fps_ratio)
        thermal_time = 1/thermal_fps
        logger.info(f'Time between frames for Thermal SEQ: {thermal_time}')
        # Uncomment below lines if you need total time of visual video
        # vid.set(cv.CAP_PROP_POS_AVI_RATIO,1)
        # total_time = vid.get(cv.CAP_PROP_POS_MSEC)
        last_save_time = -1*thermal_time #So that it saves the 0th frame
        idx=0
        while current_frame < total_frames:
            current_frame = vid.get(cv.CAP_PROP_POS_FRAMES)  
            try:
                current_time = (1/fps)*current_frame
            except:
                current_time = 0
            ret,frame = vid.read()
            if ret:
                if (current_time - last_save_time)*1000 >= ((thermal_time*1000)-5):
                    # logger.info(f'Current Time: {current_time}  Last save time: {last_save_time}')
                    cv.imwrite(str(output_folder/f"{idx}.jpg"), frame)
                    idx+=1
                    last_save_time=current_time        
        return True 
Example #20
Source File: extract_tfrecords_main.py    From youtube-8m with Apache License 2.0 5 votes vote down vote up
def frame_iterator(filename, every_ms=1000, max_num_frames=300):
  """Uses OpenCV to iterate over all frames of filename at a given frequency.

  Args:
    filename: Path to video file (e.g. mp4)
    every_ms: The duration (in milliseconds) to skip between frames.
    max_num_frames: Maximum number of frames to process, taken from the
      beginning of the video.

  Yields:
    RGB frame with shape (image height, image width, channels)
  """
  video_capture = cv2.VideoCapture()
  if not video_capture.open(filename):
    print >> sys.stderr, 'Error: Cannot open video file ' + filename
    return
  last_ts = -99999  # The timestamp of last retrieved frame.
  num_retrieved = 0

  while num_retrieved < max_num_frames:
    # Skip frames
    while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts:
      if not video_capture.read()[0]:
        return

    last_ts = video_capture.get(CAP_PROP_POS_MSEC)
    has_frames, frame = video_capture.read()
    if not has_frames:
      break
    yield frame
    num_retrieved += 1 
Example #21
Source File: opencv_file.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def set_pos_msec(self, pos_msec):
        """Moves the video position to |pos_msec| in msec."""
        if self.video_capture:
            self.video_capture.set(cv2.CAP_PROP_POS_MSEC, pos_msec)

    # override 
Example #22
Source File: opencv_file.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def _get_current_timestamp_func(self):
        if self.video_capture is None:
            return self.get_tick()

        video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
        return video_msec or self.get_tick()


    # override 
Example #23
Source File: opencv_gstreamer.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def _get_current_timestamp_func(self):
        video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)

        if video_msec is None:
            return self.get_tick()

        return video_msec

    # override 
Example #24
Source File: uimage.py    From Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks with MIT License 5 votes vote down vote up
def get_frame():
    """
    Get a frame from a video file.

    :return: (ndarray, float) (Loaded frame, time in seconds).
    """
    global _CAP, _FPS

    to_return_frame = None

    if _CAP is None:
        print("Error on getting frame. cv2.VideoCapture is not initialized.")
    else:
        try:
            if _CAP.isOpened():
                # Skip frames
                for i in range(int(_MAX_FPS / _FPS)):
                    _CAP.grab()

                is_valid_frame, to_return_frame = _CAP.retrieve()

                if not is_valid_frame:
                    to_return_frame = None
        except Exception as e:
            print("Error on getting a frame. Please, double-check if the video file is not corrupted.")
            print("Supported file format: MPEG-4 (*.mp4).")
            print("Check whether working versions of ffmpeg or gstreamer is installed.")
            raise e

    return to_return_frame, (_CAP.get(cv2.CAP_PROP_POS_MSEC) / 1000)