MJPG-streamer源码分析-输入部分

MJPG-streamer仅可以创建一个输入通道,对一个相机进行采集(如需多个,需另行修改)。在进行相机采集开始前,需要对其进行初始化,包括采集过程的全局变量定义和内存开辟。运行过程则是将相机采集得到的数据,根据从相机映射到内存的地址上,按照不同的格式要求(YUV或MJPEG)进行操作,包括格式转换和数据复制,最后图像数据被复制到指定的全局数据缓冲区。

====================================输入通道初始化过程=======================================

input_init()函数的初始化过程概括如下:

1、【定义和初始化变量】

包括width=640, height=480, fps=5, format=V4L2_PIX_FMT_MJPEG,char *argv[MAX_ARGUMENTS]={NULL}, *dev = "/dev/video0",

2、【初始化互斥锁】

pthread_mutex_init(&controls_mutex, NULL)

3、【解析参数】

将传入的单串参数param->parameter_string转换为字符串数组,存放在argv数组中,调用c = getopt_long_only(argc, argv, "", long_options, &option_index);通过option_index和switch将对应参数存放在目标对象上,包括dev,width,height,fps,format,gquality,dynctrls,led或打印help,具体可看源码。这个过程跟主函数中对参数的解析过程类似,只是在主函数中,主要是把输入参数分别分配到全局变量的输入参数和输出参数上,而在初始化函数中,参数解析也是类似的,只是把参数解析成对应对象,如width, height, fps, format等参数,

4、【设置参数input_uvc.c全局参数】

其中,在input_uvc.c文件中也定义了一个全局变量global的对象指针,并将其指向了主函数文件中的全局对象,这样,在input_uvc.c文件中的采集线程处理函数void *cam_thread( void *arg )可以直接通过该指针直接调用。
pglobal = param->global;
videoIn = malloc(sizeof(struct vdIn));
memset(videoIn, 0, sizeof(struct vdIn));

5、【打开视频设备并准备数据结构】

init_videoIn(videoIn, dev, width, height, fps, format, 1)

定义videoIn的结构体如下,其中videoIn用于存放输入的所有参数,包括图像大小、帧率、格式、缓冲区、相机参数等,

struct vdIn {
    int fd;
    char *videodevice;
    char *status;
    char *pictName;
    struct v4l2_capability cap;
    struct v4l2_format fmt;
    struct v4l2_buffer buf;
    struct v4l2_requestbuffers rb;
    void *mem[NB_BUFFER];
    unsigned char *tmpbuffer;
    unsigned char *framebuffer;
    int isstreaming;
    int grabmethod;
    int width;
    int height;
    int fps;
    int formatIn;
    int formatOut;
    int framesizeIn;
    int signalquit;
    int toggleAvi;
    int getPict;
    int rawFrameCapture;
    /* raw frame capture */
    unsigned int fileCounter;
    /* raw frame stream capture */
    unsigned int rfsFramesWritten;
    unsigned int rfsBytesWritten;
    /* raw stream capture */
    FILE *captureFile;
    unsigned int framesWritten;
    unsigned int bytesWritten;
    int framecount;
    int recordstart;
    int recordtime;
};

设备打开函数init_videoIn():主要是对vdIn定义的对象(即在input_init()函数传入的videoIn参数,该参数在初始化文件中是全局状态)进行赋值初始化,并调用init_v4l2()函数初始化设备,通过返回的相机参数,由于相机返回的图像格式可能是YUYV,也可能是MJPEG格式,两种格式在处理上有所不同,因此不同格式下数据需要复制到vd对象上不同的缓冲区,对应后续不同图像复制处理函数,因此需要根据格式不同,对对应的缓冲区的内存进行分配(也就是说,只有一种格式缓冲区会被分配内存)。

int init_videoIn(struct vdIn *vd, char *device, int width, int height, int fps, int format, int grabmethod)
{
  if (vd == NULL || device == NULL)
    return -1;
  if (width == 0 || height == 0)
    return -1;
  if (grabmethod < 0 || grabmethod > 1)
    grabmethod = 1;		//mmap by default;
  vd->videodevice = NULL;
  vd->status = NULL;
  vd->pictName = NULL;
  vd->videodevice = (char *) calloc (1, 16 * sizeof (char));
  vd->status = (char *) calloc (1, 100 * sizeof (char));
  vd->pictName = (char *) calloc (1, 80 * sizeof (char));
  //函数说明:最多从源串中拷贝n-1个字符到目标串中,然后再在后面加一个0。所以如果目标串的大小为n 的话,将不会溢出。
  //函数返回值:若成功则返回欲写入的字符串长度,若出错则返回负值。
  snprintf (vd->videodevice, 12, "%s", device);
  vd->toggleAvi = 0;
  vd->getPict = 0;
  vd->signalquit = 1;
  vd->width = width;
  vd->height = height;
  vd->fps = fps;
  vd->formatIn = format;
  vd->grabmethod = grabmethod;
  //初始化设备
  if (init_v4l2 (vd) < 0) {
    fprintf (stderr, " Init v4L2 failed !! exit fatal \n");
    goto error;;
  }
  /* alloc a temp buffer to reconstruct the pict */
  vd->framesizeIn = (vd->width * vd->height << 1);
  switch (vd->formatIn) {
  case V4L2_PIX_FMT_MJPEG:
    vd->tmpbuffer = (unsigned char *) calloc(1, (size_t) vd->framesizeIn);
    if (!vd->tmpbuffer)
      goto error;
    vd->framebuffer =
        (unsigned char *) calloc(1, (size_t) vd->width * (vd->height + 8) * 2);
    break;
  case V4L2_PIX_FMT_YUYV:
    vd->framebuffer =
        (unsigned char *) calloc(1, (size_t) vd->framesizeIn);
    break;
  default:
    fprintf(stderr, " should never arrive exit fatal !!\n");
    goto error;
    break;
  }
  if (!vd->framebuffer)
    goto error;
  return 0;
error:
  free(vd->videodevice);
  free(vd->status);
  free(vd->pictName);
  close(vd->fd);
  return -1;
}

调用init_v4l2()函数,主要是对相机参数的初始化。尤其重要部分是,把相机采集后得到数据的缓冲区映射到内存上

static int init_v4l2(struct vdIn *vd)
{
  int i;
  int ret = 0;
  //打开设备,vd->videodevice在前面经过参数解析后得到
  if ((vd->fd = open(vd->videodevice, O_RDWR)) == -1) {
    perror("ERROR opening V4L interface");
    return -1;
  }

  memset(&vd->cap, 0, sizeof(struct v4l2_capability));
  //设备驱动程序中对设备的I/O通道进行管理的函数,成功返回0,失败返回-1;
  ret = ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap);
  if (ret < 0) {
    fprintf(stderr, "Error opening device %s: unable to query device.\n", vd->videodevice);
    goto fatal;
  }

  if ((vd->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
    fprintf(stderr, "Error opening device %s: video capture not supported.\n",
           vd->videodevice);
    goto fatal;;
  }

  if (vd->grabmethod) {
    if (!(vd->cap.capabilities & V4L2_CAP_STREAMING)) {
      fprintf(stderr, "%s does not support streaming i/o\n", vd->videodevice);
      goto fatal;
    }
  } else {
    if (!(vd->cap.capabilities & V4L2_CAP_READWRITE)) {
      fprintf(stderr, "%s does not support read i/o\n", vd->videodevice);
      goto fatal;
    }
  }

  //设备输入参数
  memset(&vd->fmt, 0, sizeof(struct v4l2_format));
  vd->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  vd->fmt.fmt.pix.width = vd->width;
  vd->fmt.fmt.pix.height = vd->height;
  vd->fmt.fmt.pix.pixelformat = vd->formatIn;
  vd->fmt.fmt.pix.field = V4L2_FIELD_ANY;
  ret = ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt);
  if (ret < 0) {
    perror("Unable to set format");
    goto fatal;
  }

  if ((vd->fmt.fmt.pix.width != vd->width) ||
      (vd->fmt.fmt.pix.height != vd->height)) {
    fprintf(stderr, " format asked unavailable get width %d height %d \n", vd->fmt.fmt.pix.width, vd->fmt.fmt.pix.height);
    vd->width = vd->fmt.fmt.pix.width;
    vd->height = vd->fmt.fmt.pix.height;
    /*
     * look the format is not part of the deal ???
     */
    // vd->formatIn = vd->fmt.fmt.pix.pixelformat;
  }

   //设置帧率
  struct v4l2_streamparm *setfps;
  setfps = (struct v4l2_streamparm *) calloc(1, sizeof(struct v4l2_streamparm));
  memset(setfps, 0, sizeof(struct v4l2_streamparm));
  setfps->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  setfps->parm.capture.timeperframe.numerator = 1;
  setfps->parm.capture.timeperframe.denominator = vd->fps;
  ret = ioctl(vd->fd, VIDIOC_S_PARM, setfps);


   //设置缓冲区参数
  memset(&vd->rb, 0, sizeof(struct v4l2_requestbuffers));
  vd->rb.count = NB_BUFFER;
  vd->rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  vd->rb.memory = V4L2_MEMORY_MMAP;

  ret = ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb);
  if (ret < 0) {
    perror("Unable to allocate buffers");
    goto fatal;
  }

  /*
   * 重要部分,MMAP缓冲区映射,把摄像头采集的数据存放地址映射到内存上,并将对应的内存地址记录在vd->mem[i]上,
   * 这样子在后续的复制采集数据时可以直接操作该内存地址
   */
  for (i = 0; i < NB_BUFFER; i++) {
    memset(&vd->buf, 0, sizeof(struct v4l2_buffer));
    vd->buf.index = i;
    vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    vd->buf.memory = V4L2_MEMORY_MMAP;
    ret = ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf);
    if (ret < 0) {
      perror("Unable to query buffer");
      goto fatal;
    }

    if (debug)
      fprintf(stderr, "length: %u offset: %u\n", vd->buf.length, vd->buf.m.offset);

    vd->mem[i] = mmap(0 /* start anywhere */ ,
                      vd->buf.length, PROT_READ, MAP_SHARED, vd->fd,
                      vd->buf.m.offset);
    if (vd->mem[i] == MAP_FAILED) {
      perror("Unable to map buffer");
      goto fatal;
    }
    if (debug)
      fprintf(stderr, "Buffer mapped at address %p.\n", vd->mem[i]);
  }

  /*
   * 对缓冲区的队列,将空闲的内存加入可捕获视频的队列中,相机会将视频数据存放在该内存上
   */
  for (i = 0; i < NB_BUFFER; ++i) {
    memset(&vd->buf, 0, sizeof(struct v4l2_buffer));
    vd->buf.index = i;
    vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    vd->buf.memory = V4L2_MEMORY_MMAP;
    ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);
    if (ret < 0) {
      perror("Unable to queue buffer");
      goto fatal;;
    }
  }
  return 0;
fatal:
  return -1;

}

上述函数设计到通过ioctl命令与驱动程序进行交互,其中常见ioctl命令有以下几种,对应的功能在后面注释,关于参数更多的解释,可以参考文章: ioctl命令

VIDIOC_QUERYCAP     /* 获取设备支持的操作 */  
VIDIOC_G_FMT        /* 获取设置支持的视频格式 */  
VIDIOC_S_FMT        /* 设置捕获视频的格式 */  
VIDIOC_REQBUFS      /* 向驱动提出申请内存的请求 */  
VIDIOC_QUERYBUF     /* 向驱动查询申请到的内存 */  
VIDIOC_QBUF         /* 将空闲的内存加入可捕获视频的队列 */  
VIDIOC_DQBUF        /* 将已经捕获好视频的内存拉出已捕获视频的队列 */  
VIDIOC_STREAMON     /* 打开视频流 */  
VIDIOC_STREAMOFF    /* 关闭视频流 */  
VIDIOC_QUERYCTRL    /* 查询驱动是否支持该命令 */  
VIDIOC_G_CTRL       /* 获取当前命令值 */  
VIDIOC_S_CTRL       /* 设置新的命令值 */  
VIDIOC_G_TUNER      /* 获取调谐器信息 */  
VIDIOC_S_TUNER      /* 设置调谐器信息 */  
VIDIOC_G_FREQUENCY  /* 获取调谐器频率 */  
VIDIOC_S_FREQUENCY  /* 设置调谐器频率 */  
VIDIOC_DQEVENT   //出队列处理命令

经过了输入通道的初始化之后,完成了相机参数设置、内存映射、格式判定和对应格式缓冲区内存分配,并启动第一次相机的采集,但采集得到的图像数据并未复制出来,因此此时主函数中的全局变量global还未为其缓冲区分配内存。
==========================================输入通道执行过程===============================================

主函数通过调用int input_run(void)函数启动相机采集,源码如下。可以看到,在执行run时,程序先为全局变量的buff分配内存,并为相机创建一个线程,调用线程处理函数cam_thread()。接着使用pthread_detach(),在线程退出时自动释放线程分配的资源。

pthread_detach()的意义在于:创建一个线程默认的状态是joinable,如果一个线程结束运行但没有被join,则它的状态类似于进程中的Zombie Process,即还有一部分资源没有被回收(退出状态码)。所以创建线程者应该调用pthread_join来等待线程运行结束,并可得到线程的退出代 码,回收其资源(类似于wait,waitpid) 。但是调用pthread_join(pthread_id)后,如果该线程没有运行结束,调用者会被阻塞。利用pthread_detach(),这将该子线程的状态设置为detached,则该线程运行结束后会自动释放所有资源。

int input_run(void) {
  pglobal->buf = malloc(videoIn->framesizeIn);
  if (pglobal->buf == NULL) {
    fprintf(stderr, "could not allocate memory\n");
    exit(EXIT_FAILURE);
  }

  pthread_create(&cam, 0, cam_thread, NULL);
  pthread_detach(cam);

  return 0;
}

对应线程处理函数cam_thread(),主要工作是获取一帧图像,并将其复制到全局缓冲区中,即上述函数所分配的全局buf位置上。获取图像是通过调用nvcGrab(vedioIn)函数,之后根据不同的格式,进行格式转换和复制到全局缓冲区的过程。

其中,当格式是YUYV时,执行pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality);,并将所复制的内容大小写进pglobal->size。这个过程需要注意的是,由于YUV格式转换时,需要更多的CPU时间处理,故而会影响性能,应尽量避免使用YUV格式;

当采集格式是MJPEG格式,直接复制到内存中,调用pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused);并将所复制的内容大小写进pglobal->size。

void *cam_thread( void *arg ) {
  /* set cleanup handler to cleanup allocated ressources */
  pthread_cleanup_push(cam_cleanup, NULL);

  //pglobal->stop在主程序中初始化为0,当又ctrl+C按下后,调用signal_handler()函数清理,将其置1。
  //所以在调用run函数时,进入该循环体会不断的采集数据并将其复制到全局变量的缓冲区上,直到被置1才退出
  while( !pglobal->stop ) {

    /* grab a frame */
    if( uvcGrab(videoIn) < 0 ) {
      IPRINT("Error grabbing frames\n");
      exit(EXIT_FAILURE);
    }
  
    DBG("received frame of size: %d\n", videoIn->buf.bytesused);

    /*
     * Workaround for broken, corrupted frames:
     * Under low light conditions corrupted frames may get captured.
     * The good thing is such frames are quite small compared to the regular pictures.
     * For example a VGA (640x480) webcam picture is normally >= 8kByte large,
     * corrupted frames are smaller.
     */
    if ( videoIn->buf.bytesused < minimum_size ) {
      DBG("dropping too small frame, assuming it as broken\n");
      continue;
    }

    /* copy JPG picture to global buffer */
    //将图像复制到全局前,必须锁定互斥锁,保证对该缓冲区的操作只有当前线程,避免冲突
   pthread_mutex_lock( &pglobal->db );

    /*
     * If capturing in YUV mode convert to JPEG now.
     * This compression requires many CPU cycles, so try to avoid YUV format.
     * Getting JPEGs straight from the webcam, is one of the major advantages of
     * Linux-UVC compatible devices.
     */
    if (videoIn->formatIn == V4L2_PIX_FMT_YUYV) {
      DBG("compressing frame\n");
      pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality);
    }
    else {
      DBG("copying frame\n");
      pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused);
    }

#if 0
    /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */
    if ( (prev_size - global->size)*(prev_size - global->size) > 4*1024*1024 ) {
        DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024);
    }
    prev_size = global->size;
#endif

   //pthread_cond_broadcast(),唤醒所有阻塞在某个条件变量上的线程,这些线程被唤醒后将再次竞争相应的互斥锁
    pthread_cond_broadcast(&pglobal->db_update);
    pthread_mutex_unlock( &pglobal->db );

    DBG("waiting for next frame\n");

    /* only use usleep if the fps is below 5, otherwise the overhead is too long */
    if ( videoIn->fps < 5 ) {
      usleep(1000*1000/videoIn->fps);
    }
  }

  DBG("leaving input thread, calling cleanup function now\n");
  pthread_cleanup_pop(1);

  return NULL;
}

在nvcGrab(vedioIn)函数中,先执行ioctl命令DQBUF,通知驱动程序获取已存放有图像数据的缓存,该缓存经过初始化已经映射到内存中,故而后续的操作只需要操作对应内存的即可(映射到内存上的起始地址已经保存在vd->mem[i]中)。根据不同的视频格式要求,把视频数据复制到vd变量上不同的位置。之后再执行ioctl命令QBUF,向驱动传递应用程序已经处理完缓存,把对应的缓存加入到空闲捕获队列。之后退出本函数。而在cam_thread()函数中,只要pglobal->stop不为1,则不断采集并将数据存放到指定位置。

其中,当采集格式是MJPEG时,数据被复制到vd->tmpbuffer;采集格式是YUYV时,数据被复制到vd->framebuffer

int uvcGrab(struct vdIn *vd)
{
#define HEADERFRAME1 0xaf
  int ret;

  if (!vd->isstreaming)
    if (video_enable(vd))
      goto err;

  memset(&vd->buf, 0, sizeof(struct v4l2_buffer));
  vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  vd->buf.memory = V4L2_MEMORY_MMAP;

  ret = ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf);
  if (ret < 0) {
    perror("Unable to dequeue buffer");
    goto err;
  }

  switch (vd->formatIn) {
    case V4L2_PIX_FMT_MJPEG:
      if (vd->buf.bytesused <= HEADERFRAME1) {    /* Prevent crash
                                                  * on empty image */
        fprintf(stderr, "Ignoring empty buffer ...\n");
        return 0;
      }

      memcpy(vd->tmpbuffer, vd->mem[vd->buf.index], vd->buf.bytesused);

      if (debug)
        fprintf(stderr, "bytes in used %d \n", vd->buf.bytesused);
      break;

    case V4L2_PIX_FMT_YUYV:
      if (vd->buf.bytesused > vd->framesizeIn)
        memcpy (vd->framebuffer, vd->mem[vd->buf.index], (size_t) vd->framesizeIn);
      else
        memcpy (vd->framebuffer, vd->mem[vd->buf.index], (size_t) vd->buf.bytesused);
      break;

    default:
      goto err;
    break;
  }

  ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);
  if (ret < 0) {
    perror("Unable to requeue buffer");
    goto err;
  }

  return 0;

err:
  vd->signalquit = 0;
  return -1;
}

小结:线程执行函数中的循环体执行过程|:先执行图像采集,再锁定互斥锁,根据不同格式,从不同的位置复制图像数据到指定全局缓存中。当格式指定为YUYV时,进行格式转换后再执行复制。最后广播条件变量和解锁互斥锁,通知其他线程有新数据更新。

猜你喜欢

转载自blog.csdn.net/u011831771/article/details/77169670