c Practical text terminal displays camera video in real time

Because the yuv format is used, the frame rate is very low. The image will smear. Reducing the image size as much as possible can greatly improve it. The most troublesome thing now is that there is a black flash on the image. I don’t know why? If it is caused by the framebuffer, there is no solution. I finally found the problem. It was caused by adding a black line to clear the screen before the display. After deleting this line, the image was immediately clean and perfect.

Comparing the video taken by Eggplant software, it is beyond visible to the naked eye.

Also, the recording size of each camera is fixed and cannot be set randomly, otherwise the image cannot be displayed. You can use the v4l2 camera software written above to check the parameters.

On this basis, plus writing data to a file, it has the video recording function. But because the data is too large. It doesn't make much sense since you can't record for a long time. I’m going to learn the mjpg format before writing.

This method allows you to write video playback and slow playback function software. Unfortunately, the frame rate of videos shot in yuv format is not high, generally not exceeding 10 frames/second.

We are preparing to use multi-process, pipeline, memory sharing and other methods to increase the frame rate. The principle is to reduce the time occupied by non-sampling in the shooting sampling cycle. As shown, such as yuv conversion, in a word, try to put as little other functional code into the sampling loop as possible.

Nowadays, using framebuffer to display video is not a universal method. The most perfect method is to display it in a gui graphical interface, but the gui display method involves QT programming which is too complicated.

Also, by filling black bytes in the display phase, the video image can be moved anywhere on the entire screen. I’m wondering if Huawei Hongmeng’s startup animation also uses this display method.



#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>  //v4l2 头文件
#include <string.h>
#include <sys/mman.h>
#include <linux/fb.h>


#define  pic_width   640      //1280*720  640*360  960*540   320*240   424*240  848*480 640*480     
#define  pic_heigth  480

#define WT  0    //如果图片是斜的,微调此值

static int color1=0;
static int color2=0;
static	int sp[3000*2000];      //摄像头图片最大尺寸
static struct fb_var_screeninfo var;

#define  HCQ  10              //v4l2 缓冲区个数


int main(void)
{
	
	int fd = open("/dev/video0", O_RDWR);
	if (fd < 0) {
		perror("打开设备失败");
		return -1;
	}
	struct v4l2_format vfmt;
	vfmt.type = 1;
	vfmt.fmt.pix.width = pic_width;
	vfmt.fmt.pix.height =pic_heigth;
	
	vfmt.fmt.pix.pixelformat=v4l2_fourcc('Y','U','Y','V');
	
	int ret = ioctl(fd, VIDIOC_S_FMT, &vfmt);
	if (ret < 0) {
		perror("设置格式失败");
	}
	
	struct v4l2_requestbuffers reqbuffer;
	reqbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	
	reqbuffer.count = HCQ;
	reqbuffer.memory = V4L2_MEMORY_MMAP ;
	ret  = ioctl(fd, VIDIOC_REQBUFS, &reqbuffer);
	if (ret < 0) {
		perror("申请队列空间失败");
	}
	//我理解以下部分
	struct v4l2_buffer mapbuffer; //相当于在为mapbuffer 设置读写规则,还没有开始数据传输,怎样���摄像头buf传输数据保存在多少个mmap中
	mapbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	unsigned char *mptr[HCQ];
	
	
	for (int t = 0; t < HCQ; t++) {
		
		mapbuffer.index = t;
		ret = ioctl(fd, VIDIOC_QUERYBUF, &mapbuffer);//读摄像头缓冲区数据,为映射mmap准备
		if(ret < 0)
		{
			perror("查询内核空间队列失败");
		}
		
		mptr[t] = mmap(NULL, mapbuffer.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mapbuffer.m.offset); //摄像头缓冲区映射到内存
		//	size[t] = mapbuffer.length;
		
		ret  = ioctl(fd, VIDIOC_QBUF, &mapbuffer);    //释放一个buf缓冲区空间,摄像头可以为此buf写入数据
		if (ret < 0) {
			perror("放回失败");
		}
	}
//--------------------------我的理解:以上都是设置---------------------------------------------
//============================================================================================
	
	int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	ret = ioctl(fd, VIDIOC_STREAMON, &type);     //启动流,数据开始传输
	if (ret < 0) {
		perror("开启失败");
	}
	
	struct v4l2_buffer readbuffer;
	readbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;  //安装mapbuffer设置的规矩来读,数据从mmap中提取
//---------------------------------------------------------------------------
	while(1){
		
		
		ret = ioctl(fd, VIDIOC_DQBUF, &readbuffer); //读当前队列缓冲区的数据  index循环+1,VIDIOC_DQBUF执行一次,index加1
		if (ret < 0) {
			perror("提取数据失败");
		}
		int i=readbuffer.index;
		int len = readbuffer.length;
		
		
		char (*p)[4]=(char (*)[4])mptr[i];    //   p[][0]=Y   p[][1]=U  p[][2]=Y  p[][3]=V
		//R = Y + 1.4075 *(V-128)
		//G = Y – 0.3455 *(U –128) – 0.7169 *(V –128)
		//	B = Y + 1.779 *(U – 128)
		
		
		int t=0;
		int u=0;
		while(1){
			if(4*t>=len){
				break;
			}
			int Y1=(unsigned char)p[t][0];
			if((4*t+1)>=len){
				break;
			}
			int U=(unsigned char)p[t][1];
			if((4*t+2)>=len){
				break;
			}
			int Y2=(unsigned char)p[t][2];
			if((4*t+3)>=len){
				break;
			}
			int V=(unsigned char)p[t][3];
			//-------------------------------------------------
			int B0=Y1+1.779*(U-128);
			if(B0>255)  B0=255;
			if(B0<0)    B0=0;
			
			int G0=Y1-0.3455*(U-128)-0.7169*(V-128);
			if(G0>255)  G0=255;
			if(G0<0)    G0=0;
			
			int R0=Y1+1.4075*(V-128);
			if(R0>255)  R0=255;
			if(R0<0)    R0=0;
			
			int B1=Y2+1.779*(U-128);
			if(B1>255)  B1=255;
			if(B1<0)    B1=0;
			
			int G1=Y2-0.3455*(U-128)-0.7169*(V-128);
			if(G1>255)  G1=255;
			if(G1<0)    G1=0;
			
			int R1=Y2+1.4075*(V-128);
			if(R1>255)  R1=255;
			if(R1<0)    R1=0;
			
			color1=(R0<<16)|(G0<<8)|B0;
			color2=(R1<<16)|(G1<<8)|B1;
			
			sp[u]=color1;
			sp[u+1]=color2;
			
			t++;
			u=u+2;
			
		}
		
		
		
		//--------------------------------
		int fd_fb = open("/dev/fb0", O_RDWR);
		if (fd_fb < 0)
		{
			puts("/dev/fb0 error");
			return -1;
		}
		if (ioctl(fd_fb, FBIOGET_VSCREENINFO, &var))
		{
			puts("ioctl error");
			return -1;
		}
		
//	unsigned int  pixel_width = var.bits_per_pixel / 8;                  //deepin=32
		int screen_size = var.xres * var.yres * var.bits_per_pixel / 8;     //刚好等于图片的长乘宽
		
		unsigned char *fb_base = mmap(NULL, screen_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_fb, 0);
		if (fb_base == NULL)
		{
			puts("mmap error");
			return -1;
		}
//	memset(fb_base,0, screen_size);     //设底色为黑色  必须删除这句,否则要造成黑带屏闪
		
		int zz=0;
		var.xres=var.xres+WT;            //如果图片是斜的,微调WT的值
		for(int a=0;a<var.yres;a++){
			for(int b=0;b<var.xres;b++){
				
				
				unsigned int (*p)[var.xres]=(unsigned int (*)[var.xres])fb_base;  //此处不能用图片的尺寸,因为有不同尺���大小
				if(b<pic_width){               //如果图片宽度小于framebuffer 的宽度,小与部分填充0(黑色)
					p[a][b]=sp[zz];   //sp[zz]
					zz++;
				}else{
					p[a][b]=0;                 //填充黑色
				}
				
			}
		}
		
		ret  = ioctl(fd, VIDIOC_QBUF, &readbuffer);    //把缓冲区数据放入读队列中
		if (ret < 0) {
			perror("放回失败");
		}
		
		munmap(fb_base, screen_size);
		close(fd_fb);
		
	}
	puts("over");
	
	return 0;
}

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

Guess you like

Origin blog.csdn.net/m0_59802969/article/details/134601929