Efficient camera streaming using Python

1. Description

        Let's talk about using webcam in Python. I have a simple task of reading frames from a camera and running a neural network on each frame. For one particular webcam, I had problems setting the target fps (as I understand now - because the camera can run 30 fps with mjpeg, but not raw), so I decided to dig into FFmpeg to see it Does it help.

2. OpenCV and FFmpeg two options

        I ended up getting both OpenCV and FFmpeg working, but I discovered something very interesting: FFmpeg outperforming OpenCV was my main use case. In fact, using FFmpeg, I was able to read frames  15x faster and the entire pipeline was accelerated by  32%. I couldn't believe the results and rechecked everything multiple times, but they were consistent.

        Note: When I just read in frame by frame, the performance is exactly the same, but when I run something after reading the frames, FFmpeg is faster (which takes time). I'll explain exactly what I mean below.

2.1 openCV code implementation

        Now, let's take a look at the code. First — a class to read webcam frames using OpenCV:

class VideoStreamCV:
    def __init__(self, src: int, fps: int, resolution: Tuple[int, int]):
        self.src = src
        self.fps = fps
        self.resolution = resolution
        self.cap = self._open_camera()
        self.wait_for_cam()

    def _open_camera(self):
        cap = cv2.VideoCapture(self.src)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.resolution[0])
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.resolution[1])
        fourcc = cv2.VideoWriter_fourcc(*"MJPG")
        cap.set(cv2.CAP_PROP_FOURCC, fourcc)
        cap.set(cv2.CAP_PROP_FPS, self.fps)
        return cap

    def read(self):
        ret, frame = self.cap.read()
        if not ret:
            return None
        return frame

    def release(self):
        self.cap.release()

    def wait_for_cam(self):
        for _ in range(30):
            frame = self.read()
        if frame is not None:
            return True
        return False

2.2 Using FFmpeg

  I use functions because cameras usually need time to "warm up". The FFmpeg class uses the same warmup:wait_for_cam

class VideoStreamFFmpeg:
    def __init__(self, src: int, fps: int, resolution: Tuple[int, int]):
        self.src = src
        self.fps = fps
        self.resolution = resolution
        self.pipe = self._open_ffmpeg()
        self.frame_shape = (self.resolution[1], self.resolution[0], 3)
        self.frame_size = np.prod(self.frame_shape)
        self.wait_for_cam()

    def _open_ffmpeg(self):
        os_name = platform.system()
        if os_name == "Darwin":  # macOS
            input_format = "avfoundation"
            video_device = f"{self.src}:none"
        elif os_name == "Linux":
            input_format = "v4l2"
            video_device = f"{self.src}"
        elif os_name == "Windows":
            input_format = "dshow"
            video_device = f"video={self.src}"
        else:
            raise ValueError("Unsupported OS")

        command = [
            'ffmpeg',
            '-f', input_format,
            '-r', str(self.fps),
            '-video_size', f'{self.resolution[0]}x{self.resolution[1]}',
            '-i', video_device,
            '-vcodec', 'mjpeg',  # Input codec set to mjpeg
            '-an', '-vcodec', 'rawvideo',  # Decode the MJPEG stream to raw video
            '-pix_fmt', 'bgr24',
            '-vsync', '2',
            '-f', 'image2pipe', '-'
        ]

        if os_name == "Linux":
            command.insert(2, "-input_format")
            command.insert(3, "mjpeg")

        return subprocess.Popen(
            command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=10**8
        )

    def read(self):
        raw_image = self.pipe.stdout.read(self.frame_size)
        if len(raw_image) != self.frame_size:
            return None
        image = np.frombuffer(raw_image, dtype=np.uint8).reshape(self.frame_shape)
        return image

    def release(self):
        self.pipe.terminate()

    def wait_for_cam(self):
        for _ in range(30):
            frame = self.read()
        if frame is not None:
            return True
        return False

For timing function, I used decorator:run

def timeit(func):
    def wrapper(*args, **kwargs):
        t0 = time.perf_counter()
        result = func(*args, **kwargs)
        t1 = time.perf_counter()
        print(f"Main function time: {round(t1-t0, 4)}s")
        return result

    return wrapper

        As a heavy synthesis task, I used this simple function instead of a neural network (it could just as well have been). This is a very important part because without any task, the reading speed of OpenCV and FFmpeg is the same:time.sleep

def computation_task():
    for _ in range(5000000):
        9999 * 9999

        Now the function runs with the loop where I read the frame, and it's time:computation_task

@timeit
def run(cam: VideoStreamCV | VideoStreamFFmpeg, run_task: bool):
    timer = []
    for _ in range(100):
        t0 = time.perf_counter()
        cam.read()
        timer.append(time.perf_counter() - t0)

        if run_task:
            computation_task()

    cam.release()
    return round(np.mean(timer), 4)

        Finally, I set a few parameters, initialized 2 video streams using OpenCV and FFmpeg, and ran them without and with it.maincomputation_task

def main():
    fsp = 30
    resolution = (1920, 1080)

    for run_task in [False, True]:
        ff_cam = VideoStreamFFmpeg(src=0, fps=fsp, resolution=resolution)
        cv_cam = VideoStreamCV(src=0, fps=fsp, resolution=resolution)

        print(f"FFMPEG, task {run_task}:")
        print(f"Mean frame read time: {run(cam=ff_cam, run_task=run_task)}s\n")
        print(f"CV2, task {run_task}:")
        print(f"Mean frame read time: {run(cam=cv_cam, run_task=run_task)}s\n")

        This is what I get:

FFMPEG, task False:
Main function time: 3.2334s
Mean frame read time: 0.0323s

CV2, task False:
Main function time: 3.3934s
Mean frame read time: 0.0332s

FFMPEG, task True:
Main function time: 4.461s
Mean frame read time: 0.0014s

CV2, task True:
Main function time: 6.6833s
Mean frame read time: 0.023s

        So without the composition task I get the same reading time: ,. But for compositing tasks: and so FFmpeg is much faster. The beauty is that I got real speedups on my neural network application, not just synthetic tests, so I decided to share the results.0.03230.03320.00140.023

        The following graph shows the time required for 1 iteration: reading the frame, processing it using the yolov8s model (on CPU), and saving the frame with the detected objects:

Three complete scripts

        Here is the complete script with comprehensive tests:

import platform
import subprocess
import time
from typing import Tuple
import cv2
import numpy as np


class VideoStreamFFmpeg:
    def __init__(self, src: int, fps: int, resolution: Tuple[int, int]):
        self.src = src
        self.fps = fps
        self.resolution = resolution
        self.pipe = self._open_ffmpeg()
        self.frame_shape = (self.resolution[1], self.resolution[0], 3)
        self.frame_size = np.prod(self.frame_shape)
        self.wait_for_cam()

    def _open_ffmpeg(self):
        os_name = platform.system()
        if os_name == "Darwin":  # macOS
            input_format = "avfoundation"
            video_device = f"{self.src}:none"
        elif os_name == "Linux":
            input_format = "v4l2"
            video_device = f"{self.src}"
        elif os_name == "Windows":
            input_format = "dshow"
            video_device = f"video={self.src}"
        else:
            raise ValueError("Unsupported OS")

        command = [
            'ffmpeg',
            '-f', input_format,
            '-r', str(self.fps),
            '-video_size', f'{self.resolution[0]}x{self.resolution[1]}',
            '-i', video_device,
            '-vcodec', 'mjpeg',  # Input codec set to mjpeg
            '-an', '-vcodec', 'rawvideo',  # Decode the MJPEG stream to raw video
            '-pix_fmt', 'bgr24',
            '-vsync', '2',
            '-f', 'image2pipe', '-'
        ]

        if os_name == "Linux":
            command.insert(2, "-input_format")
            command.insert(3, "mjpeg")

        return subprocess.Popen(
            command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=10**8
        )

    def read(self):
        raw_image = self.pipe.stdout.read(self.frame_size)
        if len(raw_image) != self.frame_size:
            return None
        image = np.frombuffer(raw_image, dtype=np.uint8).reshape(self.frame_shape)
        return image

    def release(self):
        self.pipe.terminate()

    def wait_for_cam(self):
        for _ in range(30):
            frame = self.read()
        if frame is not None:
            return True
        return False


class VideoStreamCV:
    def __init__(self, src: int, fps: int, resolution: Tuple[int, int]):
        self.src = src
        self.fps = fps
        self.resolution = resolution
        self.cap = self._open_camera()
        self.wait_for_cam()

    def _open_camera(self):
        cap = cv2.VideoCapture(self.src)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.resolution[0])
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.resolution[1])
        fourcc = cv2.VideoWriter_fourcc(*"MJPG")
        cap.set(cv2.CAP_PROP_FOURCC, fourcc)
        cap.set(cv2.CAP_PROP_FPS, self.fps)
        return cap

    def read(self):
        ret, frame = self.cap.read()
        if not ret:
            return None
        return frame

    def release(self):
        self.cap.release()

    def wait_for_cam(self):
        for _ in range(30):
            frame = self.read()
        if frame is not None:
            return True
        return False


def timeit(func):
    def wrapper(*args, **kwargs):
        t0 = time.perf_counter()
        result = func(*args, **kwargs)
        t1 = time.perf_counter()
        print(f"Main function time: {round(t1-t0, 4)}s")
        return result

    return wrapper


def computation_task():
    for _ in range(5000000):
        9999 * 9999


@timeit
def run(cam: VideoStreamCV | VideoStreamFFmpeg, run_task: bool):
    timer = []
    for _ in range(100):
        t0 = time.perf_counter()
        cam.read()
        timer.append(time.perf_counter() - t0)

        if run_task:
            computation_task()

    cam.release()
    return round(np.mean(timer), 4)


def main():
    fsp = 30
    resolution = (1920, 1080)

    for run_task in [False, True]:
        ff_cam = VideoStreamFFmpeg(src=0, fps=fsp, resolution=resolution)
        cv_cam = VideoStreamCV(src=0, fps=fsp, resolution=resolution)

        print(f"FFMPEG, task {run_task}:")
        print(f"Mean frame read time: {run(cam=ff_cam, run_task=run_task)}s\n")
        print(f"CV2, task {run_task}:")
        print(f"Mean frame read time: {run(cam=cv_cam, run_task=run_task)}s\n")


if __name__ == "__main__":
    main()

NOTE: This script has been tested on Apple's M1 Pro chip. Hope this is helpful! Argo Sakyan

 

Guess you like

Origin blog.csdn.net/gongdiwudu/article/details/132697916