USB camera video surveillance project study notes

The system call of a camera monitoring application is as follows:

/ * open
 * VIDIOC_QUERYCAP determines whether it is a video capture device, which interface supports (streaming / read, write)
 * VIDIOC_ENUM_FMT query which format is supported
 * VIDIOC_S_FMT sets which format the camera uses
 * VIDIOC_REQBUFS application buffer
 for streaming:
 * VIDIOC_QUERYBUF determines each buffer information and mmap
 * VIDIOC_QBUF put in the queue
 * VIDIOC_STREAMON start the device
 * poll waiting for data
 * VIDIOC_DQBUF get out of the queue
 * processing ....
 * VIDIOC_QBUF put in the queue
 * ....
 * VIDIOC_STREAMOFF stop the device
 *
 * /

 

This article is a note of a camera monitoring project previously made by the author. The main purpose is to record the learning process, which is convenient for later review. We combine the application and driver of the uvc camera to analyze the data generated by the uvc camera from the hardware to the upper application The process by which a program acquires data.

 

One: VIDIOC_QUERYCAP

The VIDIOC_QUERYCAP function is mainly to set the v4l2_capability structure variable tV4l2Cap that is passed in by APP (passed in by pointer, the basics involved in this article are basically passed in by pointer. For the convenience of narrative, simply say it as the incoming variable), and then APP according to the settings The value determines whether it is a video device, a stream device, etc ...

APP program:

struct v4l2_capability tV4l2Cap;
memset(&tV4l2Cap, 0, sizeof(struct v4l2_capability));
iError = ioctl(iFd, VIDIOC_QUERYCAP, &tV4l2Cap);
if (iError) {
    DBG_PRINTF("Error opening device %s: unable to query device.\n", strDevName);
    goto err_exit;
}

if (!(tV4l2Cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{
    DBG_PRINTF("%s is not a video capture device\n", strDevName);
    goto err_exit;
}

if (tV4l2Cap.capabilities & V4L2_CAP_STREAMING) {
	DBG_PRINTF("%s supports streaming i/o\n", strDevName);
}
    
if (tV4l2Cap.capabilities & V4L2_CAP_READWRITE) {
	DBG_PRINTF("%s supports read i/o\n", strDevName);
}

driver:

case VIDIOC_QUERYCAP:        
	{
		struct v4l2_capability *cap = arg;    //获取APP传入的指针

		memset(cap, 0, sizeof *cap);
		strlcpy(cap->driver, "uvcvideo", sizeof cap->driver);
		strlcpy(cap->card, vdev->name, sizeof cap->card);
		usb_make_path(stream->dev->udev,
			      cap->bus_info, sizeof(cap->bus_info));
		cap->version = DRIVER_VERSION_NUMBER;        
		if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)    //如果插入的设备是摄像头
			cap->capabilities = V4L2_CAP_VIDEO_CAPTURE      //设置传入的结构体变量的信息 
					  | V4L2_CAP_STREAMING;
		else
			cap->capabilities = V4L2_CAP_VIDEO_OUTPUT
					  | V4L2_CAP_STREAMING;
		break;
	}

Q: On what basis does the underlying driver set the variables we pass in?

Answer: Device descriptor.

Q: Where does the device descriptor come from?

A: When our uvc camera is just plugged into the development board, the USB bus driver will generate a usb_device structure and hang it into the queue of the USB bus driver. This usb_device structure contains our hardware information, and This is the descriptor.

 

Two: VIDIOC_ENUM_FMT

The VIDIOC_ENUM_FMT function is mainly to set the v4l2_fmtdesc structure variable tFmtDesc passed by the APP, and then the APP judges whether this pixelformat is supported. If it supports it, it is assigned to the APP's tVideoDevice (ptVideoDevice is its pointer)-> iPixelFormat. .

APP program:

        struct v4l2_fmtdesc tFmtDesc;
    memset(&tFmtDesc, 0, sizeof(tFmtDesc));
	tFmtDesc.index = 0;
	tFmtDesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    //枚举摄像头硬件支持的各种格式,若我们的应用程序支持这种格式则跳出循环
	while ((iError = ioctl(iFd, VIDIOC_ENUM_FMT, &tFmtDesc)) == 0) {
        if (isSupportThisFormat(tFmtDesc.pixelformat))//这个函数在下面定义了
        {
            ptVideoDevice->iPixelFormat = tFmtDesc.pixelformat;
            break;
        }
		tFmtDesc.index++;//index++再传入驱动即可查看硬件所支持的下一种格式
	}

    if (!ptVideoDevice->iPixelFormat)//无法支持
    {
    	DBG_PRINTF("can not support the format of this device\n");
        goto err_exit;        
    }

driver:

	case VIDIOC_ENUM_FMT:
	{
		struct v4l2_fmtdesc *fmt = arg;    //获取APP传入的指针
		struct uvc_format *format;
		enum v4l2_buf_type type = fmt->type;
		__u32 index = fmt->index;

        //检查APP传入的数据
		if (fmt->type != stream->type ||
		    fmt->index >= stream->nformats)    //类型不对或者超过硬件支持的种数了
			return -EINVAL;

		memset(fmt, 0, sizeof(*fmt));
		fmt->index = index;
		fmt->type = type;

        //根据硬件的信息设置APP传入的变量
		format = &stream->format[fmt->index];
		fmt->flags = 0;
		if (format->flags & UVC_FMT_FLAG_COMPRESSED)
			fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
		strlcpy(fmt->description, format->name,
			sizeof fmt->description);
		fmt->description[sizeof fmt->description - 1] = 0;
		fmt->pixelformat = format->fcc;        //设置pixelformat,如YUYV、RGB、MJPEG等等...
		break;
	}

 

Three: VIDIOC_S_FMT

We first obtain the LCD resolution and tVideoDevice-> iPixelFormat (such as YUYV, MJPEG, etc.) in the APP, and then pass it into the driver through the v4l2_format structure variable tV4l2Fmt . The values ​​passed in have not actually been set on the hardware. It is only temporarily saved , and it is really sent to the hardware when streamon.

Q: Why do you need to get the resolution of the LCD in the APP first, and then pass it to the driver?

A: Our project is to obtain the data collected by the camera and display it on the LCD. In order to enhance the display effect, we set the resolution of the image collected by the camera to the same as our LCD through the incoming parameters (if the camera does not support this resolution , Then the driver will find a resolution close to the value we set).

    int iLcdWidth;
    int iLcdHeigt;
    int iLcdBpp;
    struct v4l2_format  tV4l2Fmt;

    /* set format in */
    GetDispResolution(&iLcdWidth, &iLcdHeigt, &iLcdBpp);//获取LCD分辨率
    memset(&tV4l2Fmt, 0, sizeof(struct v4l2_format));
    tV4l2Fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    tV4l2Fmt.fmt.pix.pixelformat = ptVideoDevice->iPixelFormat;
    tV4l2Fmt.fmt.pix.width       = iLcdWidth;
    tV4l2Fmt.fmt.pix.height      = iLcdHeigt;
    tV4l2Fmt.fmt.pix.field       = V4L2_FIELD_ANY;

    /* 如果驱动程序发现无法某些参数(比如分辨率),
     * 它会调整这些参数, 并且返回给应用程序
     */
    iError = ioctl(iFd, VIDIOC_S_FMT, &tV4l2Fmt); 
    if (iError) 
    {
    	DBG_PRINTF("Unable to set format\n");
        goto err_exit;        
    }
	/* 读出调整后的参数 */
    ptVideoDevice->iWidth  = tV4l2Fmt.fmt.pix.width;
    ptVideoDevice->iHeight = tV4l2Fmt.fmt.pix.height;

driver:

	/* Find the closest image size. The distance between image sizes is
	 * the size in pixels of the non-overlapping regions between the
	 * requested size and the frame-specified size.
	 */
        //找到和我们传入的值最接近的分辨率(fmt是我们在APP里传入的那个变量)
	rw = fmt->fmt.pix.width;
	rh = fmt->fmt.pix.height;
	maxd = (unsigned int)-1;

	for (i = 0; i < format->nframes; ++i) {
		__u16 w = format->frame[i].wWidth;
		__u16 h = format->frame[i].wHeight;

		d = min(w, rw) * min(h, rh);
		d = w*h + rw*rh - 2*d;
		if (d < maxd) {
			maxd = d;
			frame = &format->frame[i];
		}

		if (maxd == 0)
			break;
	}            //此处代码没贴全(因为太长了)
        ...
        ...

        
	fmt->fmt.pix.width = frame->wWidth;
	fmt->fmt.pix.height = frame->wHeight;
	fmt->fmt.pix.field = V4L2_FIELD_NONE;
	fmt->fmt.pix.bytesperline = format->bpp * frame->wWidth / 8;
	fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
	fmt->fmt.pix.colorspace = format->colorspace;
	fmt->fmt.pix.priv = 0;

 

Four: VIDIOC_REQBUFS

VIDIOC_REQBUFS first set some values ​​in the APP, such as the number of buffers to be applied , and then passed into the driver through the structure variable tV4l2ReqBuffs of type v4l2_requestbuffers, the driver according to the number of incoming and the third step dwMaxVideoFrameSize (sizeimage) To apply for a buffer , and then set the information about the buffer in the queue (such as the number and offset of each buffer), and finally get the number of buffers actually applied back to the APP, we save it to ptVideoDevice -> iVideoBufCnt (ptVideoDevice-> iVideoBufCnt = tV4l2ReqBuffs.count;)

APP program:

    struct v4l2_requestbuffers tV4l2ReqBuffs;
    /* request buffers */
    memset(&tV4l2ReqBuffs, 0, sizeof(struct v4l2_requestbuffers));
    tV4l2ReqBuffs.count = NB_BUFFER;
    tV4l2ReqBuffs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    tV4l2ReqBuffs.memory = V4L2_MEMORY_MMAP;

    iError = ioctl(iFd, VIDIOC_REQBUFS, &tV4l2ReqBuffs);
    if (iError) 
    {
    	DBG_PRINTF("Unable to allocate buffers.\n");
        goto err_exit;        
    }
    /* 申请buffer不一定能成功,真正申请到的buffer个数记录在tV4l2ReqBuffs.count */
    ptVideoDevice->iVideoBufCnt = tV4l2ReqBuffs.count;

driver

struct v4l2_requestbuffers *rb = arg;
ret = uvc_alloc_buffers(&stream->queue, rb->count,stream->ctrl.dwMaxVideoFrameSize);

Part of the code of uvc_alloc_buffers is as follows: 

/* Decrement the number of buffers until allocation succeeds. */
//如果分配不成功,则减少要分配的缓冲区个数,然后再尝试一下
	for (; nbuffers > 0; --nbuffers) {
		mem = vmalloc_32(nbuffers * bufsize);
		if (mem != NULL)
			break;
	}
//一个缓冲区都分配不成功,返回错误信息
	if (mem == NULL) {
		ret = -ENOMEM;
		goto done;
	}

//设置队列queue里各个缓冲区的信息,例如偏移量,号数index等等...
	for (i = 0; i < nbuffers; ++i) {
		memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
		queue->buffer[i].buf.= i;
		queue->buffer[i].buf.m.offset = i * bufsize;
		queue->buffer[i].buf.length = buflength;
		queue->buffer[i].buf.type = queue->type;
		queue->buffer[i].buf.field = V4L2_FIELD_NONE;
		queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
		queue->buffer[i].buf.flags = 0;
		init_waitqueue_head(&queue->buffer[i].wait);
	}

	queue->mem = mem;        //真正存放图片数据的地址在这里,队列里面的buffer数组只是记录各个缓冲区的偏移量之类的信息
	queue->count = nbuffers;
	queue->buf_size = bufsize;
	ret = nbuffers;

done:
	mutex_unlock(&queue->mutex);
	return ret;

 

五、for(i = 0; i < ptVideoDevice->iVideoBufCnt; i++){

VIDIOC_QUERYBUF 和 mmap

}

VIDIOC_QUERYBUF and mmap, first set some values ​​in the APP to the v4l2_buffer structure variable tV4l2Buf, for example: tV4l2Buf.index ( indicating which buffer information we want to query (what information? Offset value and page size (sizeimage), etc.)) Then pass tV4l2Buf to the driver, and the driver copies the buffer information corresponding to the queue to tV4l2Buf according to the tV4l2Buf.index , and returns to the APP. The APP saves tV4l2Buf.length to tVideoDevice-> iVideoBufMaxLen.

Then call mmap to pass tV4l2Buf.length, tV4l2Buf.m.offset to the driver, and after some comparison, the driver finds the buffer to be mmap ( the buffers applied in the fourth step), and passes the first address of the buffer The return value is assigned to ptVideoDevice-> pucVideBuf [i] , and after a loop, the ptVideoDevice-> pucVideBuf array points to the first address of each buffer.

APP program:

        	memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));
        	tV4l2Buf.index = i;
        	tV4l2Buf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        	tV4l2Buf.memory = V4L2_MEMORY_MMAP;
        	iError = ioctl(iFd, VIDIOC_QUERYBUF, &tV4l2Buf);
        	if (iError) 
            {
        	    DBG_PRINTF("Unable to query buffer.\n");
        	    goto err_exit;
        	}

            ptVideoDevice->iVideoBufMaxLen = tV4l2Buf.length;
        	ptVideoDevice->pucVideBuf[i] = mmap(0 /* start anywhere */ ,tV4l2Buf.length, PROT_READ, MAP_SHARED, iFd,tV4l2Buf.m.offset);
        	if (ptVideoDevice->pucVideBuf[i] == MAP_FAILED) 
            {
        	    DBG_PRINTF("Unable to map buffer\n");
        	    goto err_exit;
        	}

driver:

int uvc_query_buffer(struct uvc_video_queue *queue,
		struct v4l2_buffer *v4l2_buf)
{
	int ret = 0;

	mutex_lock(&queue->mutex);
	if (v4l2_buf->index >= queue->count) {
		ret = -EINVAL;
		goto done;
	}

        //根据传入的v4l2_buf->index决定要查询哪个缓冲区的信息
	__uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);//函数定义在下面

done:
	mutex_unlock(&queue->mutex);
	return ret;
}
static void __uvc_query_buffer(struct uvc_buffer *buf,
		struct v4l2_buffer *v4l2_buf)
{
        //把内核里面缓冲区的信息拷贝到APP传进来的地址
	memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);

	if (buf->vma_use_count)
		v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;

        //设置flag
	switch (buf->state) {
	case UVC_BUF_STATE_ERROR:
	case UVC_BUF_STATE_DONE:
		v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
		break;
	case UVC_BUF_STATE_QUEUED:
	case UVC_BUF_STATE_ACTIVE:
	case UVC_BUF_STATE_READY:
		v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
		break;
	case UVC_BUF_STATE_IDLE:
	default:
		break;
	}
}

/*
 * Memory-map a video buffer.
 *
 * This function implements video buffers memory mapping and is intended to be
 * used by the device mmap handler.
 */
int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{
	struct uvc_buffer *uninitialized_var(buffer);
	struct page *page;
	unsigned long addr, start, size;
	unsigned int i;
	int ret = 0;

	start = vma->vm_start;
	size = vma->vm_end - vma->vm_start;

	mutex_lock(&queue->mutex);

        
    /* 应用程序调用mmap函数时, 会传入offset参数
     * 根据这个offset找出指定的缓冲区
     */
	for (i = 0; i < queue->count; ++i) {
		buffer = &queue->buffer[i];
		if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)//把内核里的缓冲区信息和传进来的参数进行某种对比,如果符合则证明就是要mmap这个缓冲区
			break;
	}

	if (i == queue->count || PAGE_ALIGN(size) != queue->buf_size) {
		ret = -EINVAL;
		goto done;
	}

	/*
	 * VM_IO marks the area as being an mmaped region for I/O to a
	 * device. It also prevents the region from being core dumped.
	 */
	vma->vm_flags |= VM_IO;

    /* 根据虚拟地址找到缓冲区对应的page构体 */
	addr = (unsigned long)queue->mem + buffer->buf.m.offset;
#ifdef CONFIG_MMU
	while (size > 0) {
		page = vmalloc_to_page((void *)addr);

        /* 把page和APP传入的虚拟地址挂构 */
		if ((ret = vm_insert_page(vma, start, page)) < 0)
			goto done;

		start += PAGE_SIZE;
		addr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
#endif

	vma->vm_ops = &uvc_vm_ops;
	vma->vm_private_data = buffer;
	uvc_vm_open(vma);

done:
	mutex_unlock(&queue->mutex);
	return ret;
}

 

六、for(i = 0; i < ptVideoDevice->iVideoBufCnt; i++){

VIDIOC_QBUF

}

First clear the variable tV4l2Buf of the fifth step, and set tV4l2Buf.index to pass into the driver. The driver finds the buffer of each buffer according to tV4l2Buf.index (look at the code, it actually contains the information of the buffer. The place where the data is actually stored is queme-> mem), hang their stream and queue to the main queue and irqqueue of the queue to form two queues.

In fact, it is a doubly linked list. The above picture is just for easy understanding. 

APP program:

        	memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));
        	tV4l2Buf.index = i;
        	tV4l2Buf.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        	tV4l2Buf.memory = V4L2_MEMORY_MMAP;
        	iError = ioctl(iFd, VIDIOC_QBUF, &tV4l2Buf);
        	if (iError)
            {
        	    DBG_PRINTF("Unable to queue buffer.\n");
        	    goto err_exit;
        	}

driver: 


/*
 * Queue a video buffer. Attempting to queue a buffer that has already been
 * queued will return -EINVAL.
 */
int uvc_queue_buffer(struct uvc_video_queue *queue,
	struct v4l2_buffer *v4l2_buf)
{
	struct uvc_buffer *buf;
	unsigned long flags;
	int ret = 0;

	uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);

        //判断传入的参数是否正确
	if (v4l2_buf->type != queue->type ||
	    v4l2_buf->memory != V4L2_MEMORY_MMAP) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
			"and/or memory (%u).\n", v4l2_buf->type,
			v4l2_buf->memory);
		return -EINVAL;
	}

	mutex_lock(&queue->mutex);
        //判断传入的参数是否正确
	if (v4l2_buf->index >= queue->count) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
		ret = -EINVAL;
		goto done;
	}

        //找到对应的buffer
	buf = &queue->buffer[v4l2_buf->index];
	if (buf->state != UVC_BUF_STATE_IDLE) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
			"(%u).\n", buf->state);
		ret = -EINVAL;
		goto done;
	}

	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
	    v4l2_buf->bytesused > buf->buf.length) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
		ret = -EINVAL;
		goto done;
	}

	spin_lock_irqsave(&queue->irqlock, flags);
	if (queue->flags & UVC_QUEUE_DISCONNECTED) {
		spin_unlock_irqrestore(&queue->irqlock, flags);
		ret = -ENODEV;
		goto done;
	}
	buf->state = UVC_BUF_STATE_QUEUED;
	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
		buf->buf.bytesused = 0;
	else
		buf->buf.bytesused = v4l2_buf->bytesused;

        //把buffer的stream和queue分别挂到队列queue的mainqueue和irqqueue
	list_add_tail(&buf->stream, &queue->mainqueue);
	list_add_tail(&buf->queue, &queue->irqqueue);
	spin_unlock_irqrestore(&queue->irqlock, flags);

done:
	mutex_unlock(&queue->mutex);
	return ret;
}

 

Seven: streamon

The work of this step is mainly to initialize the URB and start the camera. We search for urb-> complete in the kernel code to find the uvc_video_complete function in Uvc_video.c. When the hardware generates a frame of data to the URB, it returns to the uvc_video_complete function

The uvc_video_complete function is as follows:

static void uvc_video_complete(struct urb *urb)
{
	struct uvc_streaming *stream = urb->context;
	struct uvc_video_queue *queue = &stream->queue;
	struct uvc_buffer *buf = NULL;
	unsigned long flags;
	int ret;

	switch (urb->status) {
	case 0:
		break;

	default:
		uvc_printk(KERN_WARNING, "Non-zero status (%d) in video "
			"completion handler.\n", urb->status);

	case -ENOENT:		/* usb_kill_urb() called. */
		if (stream->frozen)
			return;

	case -ECONNRESET:	/* usb_unlink_urb() called. */
	case -ESHUTDOWN:	/* The endpoint is being disabled. */
		uvc_queue_cancel(queue, urb->status == -ESHUTDOWN);
		return;
	}

	spin_lock_irqsave(&queue->irqlock, flags);
        //如果irqqueue队列不为空,则让buf指向它第一个节点,准备把数据从URB拷贝到第一个节点
	if (!list_empty(&queue->irqqueue))
		buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,queue);
	spin_unlock_irqrestore(&queue->irqlock, flags);

        //decode是解码的意思,其实内部就是拷贝URB上的数据到irqqueue队列的第一个节点
	stream->decode(urb, stream, buf);

        //重新提交URB
	if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
		uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
			ret);
	}
}

Search stream-> decode in the kernel source code, we will find stream-> decode = uvc_video_decode_isoc; analyzing the source code of the uvc_video_decode_isoc function, we see the following part, the role of this part of the code is to put the data on the URB (that is, the data of the following code ) Copy to the buffer corresponding to the first node of the irqqueue queue, because the calling process is more cumbersome, long story short here, detailed analysis depends on the source code.

	/* Copy the video data to the buffer. */
	maxlen = buf->buf.length - buf->buf.bytesused;
	mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;
	nbytes = min((unsigned int)len, maxlen);
	memcpy(mem, data, nbytes);
	buf->buf.bytesused += nbytes;

Then delete this node of the irqqueue queue and wake up the APP process (go to sleep when waiting for data)

list_del(&buf->queue);
wake_up(&buf->wait);

 

Eight: VIDIOC_DQBUF

When the application is awakened, VIDIOC_DQBUF is called, and the APP transfers the structure variable tV4l2Buf of type v4l2_buffer to the driver. The driver copies the data of the first node of the mainqueue queue to tV4l2Buf (we mainly use tV4l2Buf.index) , And then delete this node from the mainqueue queue.

With this tV4l2Buf.index, the APP knows that ptVideoDevice-> pucVideBuf [i] has data, and assigns its address to ptVideoBuf-> tPixelDatas.aucPixelDatas (used to point to the cache with image data each time). At this point we will get a picture from the hardware to the APP.

APP program: 

    /* VIDIOC_DQBUF */
    memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));
    tV4l2Buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    tV4l2Buf.memory = V4L2_MEMORY_MMAP;
    iRet = ioctl(ptVideoDevice->iFd, VIDIOC_DQBUF, &tV4l2Buf);
    if (iRet < 0) 
    {
    	DBG_PRINTF("Unable to dequeue buffer.\n");
    	return -1;
    }
    ptVideoDevice->iVideoBufCurIndex = tV4l2Buf.index;

    ptVideoBuf->iPixelFormat        = ptVideoDevice->iPixelFormat;
    ptVideoBuf->tPixelDatas.iWidth  = ptVideoDevice->iWidth;
    ptVideoBuf->tPixelDatas.iHeight = ptVideoDevice->iHeight;
    ptVideoBuf->tPixelDatas.iBpp    = (ptVideoDevice->iPixelFormat == V4L2_PIX_FMT_YUYV) ? 16 : \
                                        (ptVideoDevice->iPixelFormat == V4L2_PIX_FMT_MJPEG) ? 0 :  \
                                        (ptVideoDevice->iPixelFormat == V4L2_PIX_FMT_RGB565) ? 16 :  \
                                        0;
    ptVideoBuf->tPixelDatas.iLineBytes    = ptVideoDevice->iWidth * ptVideoBuf->tPixelDatas.iBpp / 8;
    ptVideoBuf->tPixelDatas.iTotalBytes   = tV4l2Buf.bytesused;
        //获取图片缓存的地址
    ptVideoBuf->tPixelDatas.aucPixelDatas = ptVideoDevice->pucVideBuf[tV4l2Buf.index];  

Some drivers:

        //取出mainqueue队列的第一个节点
	buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
        //把这个节点从mainqueue队列中删除
	list_del(&buf->stream);
        //和第五步的VIDIOC_QUERYBUF一样,它的作用其实就是把buf(也就是mainqueue队列第一个节点)的数据拷贝到v4l2_buf(我们主要用到v4l2_buf.index)
	__uvc_query_buffer(buf, v4l2_buf);

 

Nine: VIDIOC_QBUF again, in order to be able to cycle through the pictures, we have to call VIDIOC_QBUF to insert the node that was deleted from the two queues.

Execute tV4l2Buf.index = ptVideoDevice-> iVideoBufCurIndex (recorded tV4l2Buf.index in the eighth step); retrieve which buffer was deleted from the queue last time, then reinsert it into the queue, and wait for the next frame 's arrival

    struct v4l2_buffer tV4l2Buf;
    int iError;
    
	memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));
	tV4l2Buf.index  = ptVideoDevice->iVideoBufCurIndex;
	tV4l2Buf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	tV4l2Buf.memory = V4L2_MEMORY_MMAP;
	iError = ioctl(ptVideoDevice->iFd, VIDIOC_QBUF, &tV4l2Buf);
	if (iError) 
    {
	    DBG_PRINTF("Unable to queue buffer.\n");
	    return -1;
	}
    return 0;

/*
 * Queue a video buffer. Attempting to queue a buffer that has already been
 * queued will return -EINVAL.
 */
int uvc_queue_buffer(struct uvc_video_queue *queue,
	struct v4l2_buffer *v4l2_buf)
{
	struct uvc_buffer *buf;
	unsigned long flags;
	int ret = 0;

	uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);

        //判断传入的参数是否正确
	if (v4l2_buf->type != queue->type ||
	    v4l2_buf->memory != V4L2_MEMORY_MMAP) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
			"and/or memory (%u).\n", v4l2_buf->type,
			v4l2_buf->memory);
		return -EINVAL;
	}

	mutex_lock(&queue->mutex);
        //判断传入的参数是否正确
	if (v4l2_buf->index >= queue->count) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
		ret = -EINVAL;
		goto done;
	}

        //找到对应的buffer
	buf = &queue->buffer[v4l2_buf->index];
	if (buf->state != UVC_BUF_STATE_IDLE) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
			"(%u).\n", buf->state);
		ret = -EINVAL;
		goto done;
	}

	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
	    v4l2_buf->bytesused > buf->buf.length) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
		ret = -EINVAL;
		goto done;
	}

	spin_lock_irqsave(&queue->irqlock, flags);
	if (queue->flags & UVC_QUEUE_DISCONNECTED) {
		spin_unlock_irqrestore(&queue->irqlock, flags);
		ret = -ENODEV;
		goto done;
	}
	buf->state = UVC_BUF_STATE_QUEUED;
	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
		buf->buf.bytesused = 0;
	else
		buf->buf.bytesused = v4l2_buf->bytesused;

        //把buffer的stream和queue分别挂到队列queue的mainqueue和irqqueue
	list_add_tail(&buf->stream, &queue->mainqueue);
	list_add_tail(&buf->queue, &queue->irqqueue);
	spin_unlock_irqrestore(&queue->irqlock, flags);

done:
	mutex_unlock(&queue->mutex);
	return ret;
}

 

 

 

 

 

Finally, attach a few more structures used above for easy viewing

1.T_VideoBuf

typedef struct VideoBuf {
    T_PixelDatas tPixelDatas;
    int iPixelFormat;
}T_VideoBuf, *PT_VideoBuf;



/* 图片的象素数据 */
typedef struct PixelDatas {
	int iWidth;   /* 宽度: 一行有多少个象素 */
	int iHeight;  /* 高度: 一列有多少个象素 */
	int iBpp;     /* 一个象素用多少位来表示 */
	int iLineBytes;  /* 一行数据有多少字节 */
	int iTotalBytes; /* 所有字节数 */ 
	unsigned char *aucPixelDatas;  /* 象素数据存储的地方,每次都是指向有新数据的缓冲区 */
}T_PixelDatas, *PT_PixelDatas;

2.

struct VideoDevice {
    int iFd;
    int iPixelFormat;
    int iWidth;
    int iHeight;

    int iVideoBufCnt;
    int iVideoBufMaxLen;
    int iVideoBufCurIndex;    
    unsigned char *pucVideBuf[NB_BUFFER];    //通过mmap函数映射,该数组的每个变量都指向了缓冲区的地址(比如 men + 1 * offset 、men + 2 * offset)

    /* 函数 */
    PT_VideoOpr ptOPr;
};

 

3.uvc_video_queue 

struct uvc_video_queue {
	enum v4l2_buf_type type;

	void *mem;            //正真存放图片数据的地方
	unsigned int flags;

	unsigned int count;
	unsigned int buf_size;
	unsigned int buf_used;
	struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS];    //n个buffer,里面记录了每个缓冲区的偏移值和号数index等信息
	struct mutex mutex;	/* protects buffers and mainqueue */
	spinlock_t irqlock;	/* protects irqqueue */

	struct list_head mainqueue;    //供APP使用的队列(头节点)
	struct list_head irqqueue;    //供驱动使用的队列(头节点)
};

 4.uvc_buffer 

struct uvc_buffer {
	unsigned long vma_use_count;
	struct list_head stream;    //供APP使用的队列(普通节点)

	/* Touched by interrupt handler. */
	struct v4l2_buffer buf;
	struct list_head queue;    //供驱动使用的队列(普通节点)
	wait_queue_head_t wait;
	enum uvc_buffer_state state;
	unsigned int error;
};

5.v4l2_buffer  

struct v4l2_buffer {
	__u32			index;    //记录buffer的号数
	enum v4l2_buf_type      type;
	__u32			bytesused;
	__u32			flags;
	enum v4l2_field		field;
	struct timeval		timestamp;
	struct v4l2_timecode	timecode;
	__u32			sequence;

	/* memory location */
	enum v4l2_memory        memory;
	union {
		__u32           offset;    //记录各个缓冲区的偏移值,正真的图片数据在mem + n * offset
		unsigned long   userptr;
		struct v4l2_plane *planes;
	} m;
	__u32			length;
	__u32			input;
	__u32			reserved;
};

 

 

Published 42 original articles · Like 10 · Visitors 10,000+

Guess you like

Origin blog.csdn.net/qq_37659294/article/details/104326223