流媒体分析之webrtc协议之ffmpeg 实现

1. 推流处理:

AVOutputFormat ff_webrtc_muxer = {
		.name           = "webrtc",
		.long_name      = "webrtc muxer",
		.priv_data_size    = sizeof(WEBRTCContext),
		.audio_codec       = AV_CODEC_ID_OPUS,
		.video_codec       = AV_CODEC_ID_H264,
		.init              = webrtc_init,
		.write_header      = webrtc_write_header,
		.write_packet      = webrtc_write_packet,
		.write_trailer     = webrtc_write_close,
		.deinit            = webrtc_deinit,
		.flags             = AVFMT_NOFILE | AVFMT_GLOBALHEADER,
		.priv_class        = &webrtc_muxer_class,
};

 webrtc_init  函数调用webrtc_open 函数。

webrtc_init
static int webrtc_init(struct AVFormatContext *s)
{
	WEBRTCContext *h = s->priv_data;
	printf("\n webrtc init>>>>>>>>>>>>>>>>>>>%s\n",s->filename);
	h->avctx = s;
	h->video_stream_index= -1;
	h->audio_stream_index = -1;
	h->time_base_den=30;
	int ret = webrtc_open(s, s->filename);
	if (ret) {
		av_log(s, AV_LOG_ERROR, "webrtc_write_header: webrtc_open failed, %s\n",                 av_err2str(ret));
		return ret;
	}
	return ret;
}

 webrtc_open  函数调用yang_create_metaConnection 函数创建webrtc 服务操作接口。

connectSfuServer 连接srs 服务器。

static int webrtc_open(AVFormatContext *h, const char *uri)
{
	WEBRTCContext *s = h->priv_data;
	av_log(h, AV_LOG_INFO, "webrtc_open %s\n", uri);
	s->video_stream_index=-1;
	s->audio_stream_index=-1;


	s->video_codec = AV_CODEC_ID_H264;
	s->audio_codec = AV_CODEC_ID_OPUS;

	if (!av_strstart(uri, "webrtc://", NULL))  {
		return AVERROR(EINVAL);
	}

	s->handle =(YangMetaConnection*) calloc(sizeof(YangMetaConnection),1);
	yang_create_metaConnection(s->handle);

    YangStreamConfig stream;
    memset(&stream,0,sizeof(YangStreamConfig));
    stream.rtcCallback.context=s;
    stream.rtcCallback.setMediaConfig=g_ff_rtc_setPlayMediaConfig;
    stream.rtcCallback.sendRequest=g_ff_rtc_sendRequest;
    stream.recvCallback.context=s;
    stream.recvCallback.receiveAudio=g_ff_rtc_receiveAudio;
    stream.recvCallback.receiveVideo=g_ff_rtc_receiveVideo;



	if(s->handle->init) s->handle->init(s->handle->session,&stream,s);
	if(s->handle->initParam) s->handle->initParam(s->handle->session,uri,Yang_Stream_Publish);
	//Yang_Server_Srs/Yang_Server_Zlm/Yang_Server_P2p
	int32_t mediaServer=Yang_Server_Srs;
	if(s->handle->connectSfuServer(s->handle->session,mediaServer)!=0){
		av_log(h, AV_LOG_ERROR, "connect failed! uri= %s\n",uri);
	}


	av_log(h, AV_LOG_INFO, "webrtc_open exit\n");
	return 0;
}

 数据发送:webrtc_write_packet 函数调用publishAudio 及publishVideo

发送音频及视频。


static int webrtc_write_packet(AVFormatContext *h, AVPacket *pkt)
{
	if(pkt==NULL) return 0;
	WEBRTCContext *s = h->priv_data;
	YangMetaConnection* metaconn=s->handle;

	int ret=0;
	if(s->video_stream_index==-1||s->audio_stream_index==-1){

		for(int i=0;i<h->nb_streams;i++){
			AVStream* st = h->streams[i];

			if(st->codecpar->codec_id ==AV_CODEC_ID_H264) {
				s->video_stream_index=st->index;	
				s->time_base_den=st->time_base.den;

				if(st->codecpar->extradata){
					metaconn->setExtradata(metaconn->session,Yang_VED_264,(uint8_t*)st->codecpar->extradata,st->codecpar->extradata_size);
				}			
			}
			if(st->codecpar->codec_id == AV_CODEC_ID_OPUS) s->audio_stream_index=st->index;
		}

	}



	if(pkt->stream_index==s->video_stream_index){
		//if(get_videodata_start(pkt->data)) return ret;	
		s->video_frame.nb=pkt->size;
		s->video_frame.payload=pkt->data;
		s->video_frame.pts=pkt->pts*1000000/s->time_base_den;

		ret=metaconn->publishVideo(metaconn->session,&s->video_frame);
	}else if(pkt->stream_index==s->audio_stream_index){
		s->audio_frame.nb=pkt->size;
		s->audio_frame.payload=pkt->data;
		s->audio_frame.pts=pkt->pts;
		ret=metaconn->publishAudio(metaconn->session,&s->audio_frame);
	}

	return ret;

}

2. 拉流处理:

AVInputFormat ff_webrtc_demuxer = {
    .name           = "webrtc",
    .long_name      = "webrtc demuxer",
    .priv_data_size = sizeof(WEBRTCContext),
    .read_probe     = webrtc_probe,
    .read_header    = webrtc_read_header,
    .read_packet    = webrtc_read_packet,
    .read_close     = webrtc_read_close,
    .extensions      = "webrtc",
    .priv_class     = &webrtc_class,
    .flags          = AVFMT_NOFILE,
};

#ifdef BUILD_AS_PLUGIN
void register_webrtc_demuxer()
{
    av_log(NULL, AV_LOG_INFO, "register_webrtc_demuxer\n");
    av_register_input_format(&ff_webrtc_demuxer);
}
#endif

连接服务器

webrtc_read_header 调用webrtc_open 连接srs服务器

static int webrtc_read_header(AVFormatContext *s)
{
    WEBRTCContext *h = s->priv_data;
    int ret;

    av_log(s, AV_LOG_INFO, "webrtc_read_header, filename %s\n", s->filename);

    s->flags |= AVFMT_FLAG_GENPTS;
    s->ctx_flags |= AVFMTCTX_NOHEADER;
    s->fps_probe_size = 0;
    s->max_analyze_duration = FFMAX(s->max_analyze_duration, 5*AV_TIME_BASE);
    s->probesize = FFMAX(s->probesize, 512*1024);
    h->avctx = s;
    h->video_stream_index_in = 0;
    h->audio_stream_index_in = 1;
    h->video_stream_index_out = -1;
    h->audio_stream_index_out = -1;
    ret = webrtc_open(s, s->filename);
    if (ret) {
        av_log(s, AV_LOG_ERROR, "webrtc_read_header: webrtc_open failed, %s\n", av_err2str(ret));
        return ret;
    }

    // 5秒收不到数据,超时退出
    ret = packet_queue_wait_start(&h->queue, h, INT64_C(1000) * 5000);
    if (ret) {
        av_log(s, AV_LOG_ERROR, "webrtc_read_header wait failed, %s\n", av_err2str(ret));
        webrtc_close(s);
        return ret;
    }

    av_log(s, AV_LOG_INFO, "webrtc_read_header exit\n");
    return 0;
}

 音视频数据接收:libmetartc 库收到数据,通过g_ff_rtc_receiveAudio 及g_ff_rtc_receiveVideo 接口返回音视频数据。将音视频数据存放到packet_queue_put 队列。

static void g_ff_rtc_receiveAudio(void* user,YangFrame *audioFrame){
	if(user==NULL) return;
	 WEBRTCContext *s = (WEBRTCContext*)user;
	    AVPacket *pkt = &s->audio_pkt;

	    av_new_packet(pkt, audioFrame->nb);
	   //  memcpy(pkt->data, s->video_header, 4);
	    memcpy(pkt->data, audioFrame->payload, audioFrame->nb);
	    pkt->stream_index = s->audio_stream_index_in;
	    pkt->dts = audioFrame->pts;
	    pkt->pts = audioFrame->pts;
	    packet_queue_put(&s->queue, pkt, s);
}
static void g_ff_rtc_receiveVideo(void* user,YangFrame *videoFrame){
	if(user==NULL) return;

	WEBRTCContext *s = (WEBRTCContext*) user;
	AVFormatContext *h = s->avctx;
	if (videoFrame->frametype == YANG_Frametype_Spspps) {
		if(s->extradata_size >0 ) return;
		uint8_t headers[128];
		memset(headers, 0, 128);
		int32_t headerLen = 0;
		if (s->handle->parseHeader)
			s->handle->parseHeader(s->video_codec, videoFrame->payload, headers,&headerLen);
		if (headerLen > 0) {
			s->extradata = av_malloc(headerLen);
			if (!s->extradata) {
				s->error_code = AVERROR(ENOMEM);
				return;
			}
			memcpy(s->extradata, headers, headerLen);
			s->extradata_size = headerLen;

			//just a fake video packet for create stream
			if (h->ctx_flags & AVFMTCTX_NOHEADER) {
				AVPacket *pkt = &s->video_pkt;
				av_new_packet(pkt, 0);
				pkt->stream_index = s->video_stream_index_in;
				packet_queue_put(&s->queue, pkt, s);
			}
		}
		return;
	}
	if(s->extradata_size ==0 ) return;
	   AVPacket *pkt = &s->video_pkt;
		
	    av_new_packet(pkt, videoFrame->nb);
	    memcpy(pkt->data, videoFrame->payload, videoFrame->nb);
	    memcpy(pkt->data, s->video_header, 4);
	 
	    pkt->stream_index = s->video_stream_index_in;
	    pkt->dts = videoFrame->pts;
	    pkt->pts = videoFrame->pts;
	    packet_queue_put(&s->queue, pkt, s);

}

 webrtc_read_packet  函数,从队列缓存通过packet_queue_get接口 取数据。

static int webrtc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, i;
    WEBRTCContext *h = s->priv_data;
    AVStream *st;

    do {
		ret = packet_queue_get(&h->queue, h, pkt);

        if (ret < 0)
            break;

        /* now find stream */
        for (i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
            if (pkt->stream_index == h->video_stream_index_in
                && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                break;
            } else if (pkt->stream_index == h->audio_stream_index_in
                       && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                break;
            }
        }
        if (i == s->nb_streams) {
            static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO};
            st = create_stream(s, stream_types[pkt->stream_index]);
            if (!st) {
                av_packet_unref(pkt);
                ret = AVERROR(ENOMEM);
                break;
            }
        }

        if (pkt->size <= 0) {
            // drop fake packet
            av_packet_unref(pkt);
            continue;
        }

        if (pkt->stream_index == h->video_stream_index_in) {
            pkt->stream_index = h->video_stream_index_out;

        } else if (pkt->stream_index == h->audio_stream_index_in) {
            pkt->stream_index = h->audio_stream_index_out;

        } else {
            ret = 0;
        }

        if (!ret) {
            av_log(s, AV_LOG_INFO, "drop pkt with index %d and continue\n",
                   pkt->stream_index);
            av_packet_unref(pkt);
        }
    } while (!ret);

    ret = ret > 0 ? 0 : ret;
    if (ret)
        av_log(s, AV_LOG_WARNING, "webrtc_read_packet, %s\n", av_err2str(ret));
    return ret;
}

 下一个章节我们分析metartc 库是实现webrtc 协议。

猜你喜欢

转载自blog.csdn.net/u012794472/article/details/126828124