IJKPlayer播放器(https://github.com/Bilibili/ijkplayer)是一款全格式播放器,支持(rtsp/rtmp/hls)等媒体协议,支持字幕,快进快退等功能。但就播放器的功能而言,
这些时不够的,比如没有截屏,不支持手势等功能。下面就给视频播放器添加截屏功能,来熟悉IJKPlayer播放器的源码结构。
IJKPlayer播放器底层依赖于ffmpeg,基于ffplay进行改进。所以要添加截屏功能,就从ffplay入手。ffplay的核心代码被移植到ff_ffplay.c和ff_ffplay.h里面,媒体流打开时依然是从stream_open()函数开始,而在IJKPlayer里面,被封装到ffp_prepare_async()里面,所以还是从stream_open里面开始着手。stream_open里面开启是视频流读取线程
read_thread,在视频流读取线程里面,调用ffmpeg的api,打开网络流,读取音视频流,放进缓冲队列,进一步解码播放。先看stream_open的代码。
添加截图功能,需要在video_refresh_thread里面添加,因为video_refresh_thread里面的视频帧是经过解码解复用的,直接得到的就是解码后的YUV图像,也有可能是经过旋转的,所以不用再做太多的判断和转换。添加全局静态变量,用来保存实时需要截图编码的视频画面。
给上述的视频帧填充YUV数据:
现在获取到的AVFrame* g_screen_frame是解码后的YUV,可以直接保存成BMP图片,也可以对此进行编码形成jpg或者png图片,以下是将图片编码成jpg图片格式。
现在接通的功能已经做好,但是Java层还不能直接调用。这里给JNI添加接口,Java就可以直接调用来实现截屏。给ijkplayer_jni.c里面的添加方法:
然后在g_method里面进行注册:
在ijkplayer.c里面添加方法,并且在ijkplayer.h里面进行声明,ijkplayer_jni.c才能调用。
上面有一个宏定义,FFP_REQ_SCREENSHOT需要在ff_ffmsg.h里面添加,如下:
然后,就是要进一步封装ffplay里面的截图功能了,在f_ffplay.c里面添加截图方法并在ff_ffplay.h里面声明:
到这里,底层JNI的截图已经完成,上传的Java就可以直接调用了。在ijkplayer-java jar项目里面找到IjkMediaPlayer.java,添加如下native方法:
这样,在其他模块,就可以调用截图功能了!!
相关链接:http://blog.csdn.net/veilling/article/details/52711159
这些时不够的,比如没有截屏,不支持手势等功能。下面就给视频播放器添加截屏功能,来熟悉IJKPlayer播放器的源码结构。
IJKPlayer播放器底层依赖于ffmpeg,基于ffplay进行改进。所以要添加截屏功能,就从ffplay入手。ffplay的核心代码被移植到ff_ffplay.c和ff_ffplay.h里面,媒体流打开时依然是从stream_open()函数开始,而在IJKPlayer里面,被封装到ffp_prepare_async()里面,所以还是从stream_open里面开始着手。stream_open里面开启是视频流读取线程
read_thread,在视频流读取线程里面,调用ffmpeg的api,打开网络流,读取音视频流,放进缓冲队列,进一步解码播放。先看stream_open的代码。
static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat)
{
assert(!ffp->is);
VideoState *is;
is = av_mallocz(sizeof(VideoState));
if (!is)
return NULL;
is->filename = av_strdup(filename);
if (!is->filename)
goto fail;
is->iformat = iformat;
is->ytop = 0;
is->xleft = 0;
/* start video display */
if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)
goto fail;
#ifdef FFP_MERGE
if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
goto fail;
#endif
if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
goto fail;
if (packet_queue_init(&is->videoq) < 0 ||
packet_queue_init(&is->audioq) < 0 ||
#ifdef FFP_MERGE
packet_queue_init(&is->subtitleq) < 0)
#else
0)
#endif
goto fail;
//创建媒体流读取线程
if (!(is->continue_read_thread = SDL_CreateCond())) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
goto fail;
}
init_clock(&is->vidclk, &is->videoq.serial);
init_clock(&is->audclk, &is->audioq.serial);
init_clock(&is->extclk, &is->extclk.serial);
is->audio_clock_serial = -1;
is->audio_volume = SDL_MIX_MAXVOLUME;
is->muted = 0;
is->av_sync_type = ffp->av_sync_type;
is->play_mutex = SDL_CreateMutex();
ffp->is = is;
is->pause_req = !ffp->start_on_prepared;
//创建视频刷新线程
is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");
if (!is->video_refresh_tid) {
av_freep(&ffp->is);
return NULL;
}
is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read");
if (!is->read_tid) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
fail:
is->abort_request = true;
if (is->video_refresh_tid)
SDL_WaitThread(is->video_refresh_tid, NULL);
stream_close(ffp);
return NULL;
}
return is;
}
添加截图功能,需要在video_refresh_thread里面添加,因为video_refresh_thread里面的视频帧是经过解码解复用的,直接得到的就是解码后的YUV图像,也有可能是经过旋转的,所以不用再做太多的判断和转换。添加全局静态变量,用来保存实时需要截图编码的视频画面。
static pthread_mutex_t screenshot_lock;//截图编码时添加互斥锁
static AVFrame *g_screenshot_frame = NULL;//保存需要编码的视频YUV图像,访问时,需要进行加锁
给上述的视频帧填充YUV数据:
static void fill_Screen_Frame(FFPlayer *ffp,AVFrame *src_frame )
{
VideoState *is = ffp->is;
if( is && !is->abort_request && src_frame->format >= 0 )
{
int fmt = src_frame->format;
pthread_mutex_lock(&screenshot_lock);
if(g_screenshot_frame==NULL)
{
g_screenshot_frame = av_frame_alloc();
if(g_screenshot_frame == NULL){
pthread_mutex_unlock(&screenshot_lock);
return 0;
}
unsigned char * buffer=(unsigned char *)av_malloc(av_image_get_buffer_size(fmt,src_frame->width,src_frame->height,1));
av_image_fill_arrays(g_screenshot_frame->data, g_screenshot_frame->linesize,buffer,fmt,src_frame->width,src_frame->height,1);
g_screenshot_frame->width = src_frame->width;
g_screenshot_frame->height = src_frame->height;
g_screenshot_frame->format = fmt;
}
if(g_screenshot_frame->width!=src_frame->width||g_screenshot_frame->height!=src_frame->height)
{
av_frame_free(&g_screenshot_frame);
g_screenshot_frame = NULL;
}
if(g_screenshot_frame)
{
av_image_copy(g_screenshot_frame->data, g_screenshot_frame->linesize,
(const uint8_t **)(src_frame->data), src_frame->linesize,
fmt, src_frame->width, src_frame->height);
}
pthread_mutex_unlock(&screenshot_lock);
}
}
已解码的视频并不是立即播放,而是放到另一个队列里面,从而和音频等来进行同步。所以对截屏视频帧同步可以放在视频帧入队之前(以下代码是精简的):
static int ffplay_video_thread(void *arg)
{
FFPlayer *ffp = arg;
VideoState *is = ffp->is;
AVFrame *frame = av_frame_alloc();
double pts;
double duration;
int ret;
AVRational tb = is->video_st->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, is->stream_rotate);
if (!frame) {
return AVERROR(ENOMEM);
}
for (;;) {
ret = get_video_frame(ffp, frame);
if (ret < 0)
goto the_end;
if (!ret)
continue;
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
fill_Screen_Frame(ffp,frame);//填充截图视频帧
ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
av_frame_unref(frame);
if (ret < 0)
goto the_end;
}
the_end:
av_frame_free(&frame);
return 0;
}
现在获取到的AVFrame* g_screen_frame是解码后的YUV,可以直接保存成BMP图片,也可以对此进行编码形成jpg或者png图片,以下是将图片编码成jpg图片格式。
int do_screenshot(const char* out_file,AVFrame* bmp)
{
LOGV("ff_ffplay do_screenshot");
if(!bmp)
{
return -1;
}
AVFormatContext* pFormatCtx;
AVOutputFormat* fmt;
AVStream* video_st;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVFrame* picture;
AVPacket pkt;
int y_size;
int got_picture=0;
int ret=0;
pFormatCtx = avformat_alloc_context();
fmt = av_guess_format("mjpeg", NULL, NULL);
if(!fmt)
{
LOGV("do_screenshot guess fmt is null");
return -1;
}
pFormatCtx->oformat = fmt;
if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
LOGV("do_screenshot Couldn't open output file.");
return -1;
}
video_st = avformat_new_stream(pFormatCtx, 0);
if (video_st==NULL){
return -1;
}
pCodecCtx = video_st->codec;
if(!pCodecCtx)
{
LOGV("do_screenshot pCodecCtx is null");
return -1;
}
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P;
pCodecCtx->width = bmp->width;
pCodecCtx->height = bmp->height;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
//Output some information
av_dump_format(pFormatCtx, 0, out_file, 1);
LOGV("do_screenshot codec_id:%d h:%d w:%d",pCodecCtx->codec_id,bmp->height,bmp->width);
pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec){
LOGV("do_screenshot avcodec_find_encoder error");
return -1;
}
LOGV("do_screenshot 05");
if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
LOGV("do_screenshot avcodec_open2 error");
return -1;
}
picture = bmp;
avformat_write_header(pFormatCtx,NULL);
y_size = pCodecCtx->width * pCodecCtx->height;
av_new_packet(&pkt,y_size*3);
//Encode
ret = avcodec_encode_video2(pCodecCtx,&pkt,picture, &got_picture);
if(ret < 0){
LOGV("Encode Error.\n");
return -1;
}
if (got_picture==1){
pkt.stream_index = video_st->index;
ret = av_write_frame(pFormatCtx, &pkt);
}
av_free_packet(&pkt);
av_write_trailer(pFormatCtx);
if (video_st){
avcodec_close(video_st->codec);
}
avio_close(pFormatCtx->pb);
avformat_free_context(pFormatCtx);
return 0;
}
现在接通的功能已经做好,但是Java层还不能直接调用。这里给JNI添加接口,Java就可以直接调用来实现截屏。给ijkplayer_jni.c里面的添加方法:
static void
IjkMediaPlayer_screenShot(JNIEnv *env, jobject thiz, jstring path)
{
MPTRACE("%s\n", __func__);
const char *c_path = NULL;
IjkMediaPlayer *mp = jni_get_media_player(env, thiz);
JNI_CHECK_GOTO(mp, env, "java/lang/IllegalStateException", "mpjni: screenshot: null mp", LABEL_RETURN);
c_path = (*env)->GetStringUTFChars(env, path, NULL );
JNI_CHECK_GOTO(c_path, env, "java/lang/OutOfMemoryError", "mpjni: screenShot: path.string oom", LABEL_RETURN);
ALOGV("screenShot: screen path %s", c_path);
ijkmp_screen_shot(mp, c_path);
LABEL_RETURN:
if (c_path)
(*env)->ReleaseStringUTFChars(env, path, c_path);
ijkmp_dec_ref_p(&mp);
}
然后在g_method里面进行注册:
{ "screenShot", "(Ljava/lang/String;)V", (void *) IjkMediaPlayer_screenShot },
在ijkplayer.c里面添加方法,并且在ijkplayer.h里面进行声明,ijkplayer_jni.c才能调用。
static int ikjmp_chkst_screenshot_l(int mp_state)
{
MPST_RET_IF_EQ(mp_state, MP_STATE_IDLE);
MPST_RET_IF_EQ(mp_state, MP_STATE_INITIALIZED);
MPST_RET_IF_EQ(mp_state, MP_STATE_ASYNC_PREPARING);
MPST_RET_IF_EQ(mp_state, MP_STATE_COMPLETED);
MPST_RET_IF_EQ(mp_state, MP_STATE_STOPPED);
MPST_RET_IF_EQ(mp_state, MP_STATE_ERROR);
MPST_RET_IF_EQ(mp_state, MP_STATE_END);
return 0;
}
int ijkmp_screen_shot_l(IjkMediaPlayer *mp, const char* path)
{
assert(mp);
MP_RET_IF_FAILED(ikjmp_chkst_screenshot_l(mp->mp_state));
mp->screenshot_req= 1;
mp->screen_path= path;
ffp_remove_msg(mp->ffplayer, FFP_REQ_SCREENSHOT);
ffp_notify_msg2(mp->ffplayer, FFP_REQ_SCREENSHOT, 0);
return 0;
}
int ijkmp_screen_shot(IjkMediaPlayer *mp, const char* path)
{
assert(mp);
MPTRACE("ijkmp_screen_shot(%s)\n", path);
freep((void**)&mp->screen_path);
mp->screen_path = strdup(path);
pthread_mutex_lock(&mp->mutex);
int retval = ijkmp_screen_shot_l(mp, mp->screen_path);
pthread_mutex_unlock(&mp->mutex);
MPTRACE("ijkmp_screen_shot(%s)=%d\n", mp->screen_path, retval);
return retval;
}
上面有一个宏定义,FFP_REQ_SCREENSHOT需要在ff_ffmsg.h里面添加,如下:
#define FFP_REQ_START 20001
#define FFP_REQ_PAUSE 20002
#define FFP_REQ_SEEK 20003
#define FFP_REQ_SCREENSHOT 20004
然后,就是要进一步封装ffplay里面的截图功能了,在f_ffplay.c里面添加截图方法并在ff_ffplay.h里面声明:
int ffp_screen_shot_l(FFPlayer *ffp,const char * path)
{
int ret = -1;
if(g_screenshot_frame)
{
if(g_screenshot_frame->format >= 0)
{
AVFrame* tmp_frame = av_frame_alloc();
int fmt = g_screenshot_frame->format;
unsigned char * buffer=(unsigned char *)av_malloc(av_image_get_buffer_size(fmt,g_screenshot_frame->width,g_screenshot_frame->height,1));
LOGV("ffp_screen_shot_l format begin");
pthread_mutex_lock(&screenshot_lock);
av_image_fill_arrays(tmp_frame->data, tmp_frame->linesize,buffer,fmt,g_screenshot_frame->width,g_screenshot_frame->height,1);
tmp_frame->width = g_screenshot_frame->width;
tmp_frame->height = g_screenshot_frame->height;
av_image_copy(tmp_frame->data, tmp_frame->linesize,
(const uint8_t **)(g_screenshot_frame->data), g_screenshot_frame->linesize,
fmt, g_screenshot_frame->width, g_screenshot_frame->height);
pthread_mutex_unlock(&screenshot_lock);
ret = do_screenshot(path,tmp_frame);
av_frame_free(&tmp_frame);
LOGV("ffp_screen_shot_l format end");
}
else
{
LOGV("ffp_screen_shot_l format error");
}
}
else
{
LOGV("ffp_screen_shot_l g_screenshot_frame is null");
}
return ret;
}
到这里,底层JNI的截图已经完成,上传的Java就可以直接调用了。在ijkplayer-java jar项目里面找到IjkMediaPlayer.java,添加如下native方法:
public native void screenShot(String path);
这样,在其他模块,就可以调用截图功能了!!
相关链接:http://blog.csdn.net/veilling/article/details/52711159