FFmpeg学习之安卓音频文件解封装解码到PCM文件

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/zb52588/article/details/82760686

FFmpeg学习之安卓音频文件解封装解码到PCM文件

一、c代码实现将音频文件解码转化成s16le的PCM(music_ffmpeg.c)

//--------------------------安卓的log
#include <jni.h>
#include <android/log.h>
#define LOG_TAG "zbv"
#define  LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#define  LOGE(...)  __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
//--------------------------安卓的log

//--------------------------ffmpeg
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libavutil/opt.h>
//--------------------------ffmpeg

static const char* sourch_path;
static const char* destination_path;

static AVFormatContext* fmt_ctx;
static AVCodecContext* audio_codec_ctx;

static enum AVSampleFormat dst_sample_fmt=AV_SAMPLE_FMT_S16;

void Java_com_example_simpleTestFFmpeg_MusicDisplayActivity_nativeDecodeMusic(JNIEnv* env,jclass clazz,jstring sourcePath,jstring destPath){
	
	int ret,stream_index,got_frame;
	AVStream *stream;
	AVCodec *codec;
	
	FILE* desFile=NULL;
	
	struct SwrContext* swr_ctx;
	
	int dst_nb_sample,max_dst_nb_sample;
	
	uint8_t **dst_data=NULL;
	
	AVFrame* frame;
	AVPacket* packet;
	
	sourch_path=(*env)->GetStringUTFChars(env,sourcePath,NULL);
	destination_path=(*env)->GetStringUTFChars(env,destPath,NULL);
	
	LOGE("原始文件path=%s,目标文件path=%s",sourch_path,destination_path);
	
	av_register_all();
	
	//需要联网调用
	avformat_network_init();
	
	//第三个参数是AVInputFormat* 如果是NULL则会自动被检测---该函数表示:打开一个输入流读取header
	if(avformat_open_input(&fmt_ctx,sourch_path,NULL,NULL)<0){
		LOGD("无法打开源文件");
		return;
	}
	
	//该函数表示:读取媒体文件的packets去获取流信息
	if(avformat_find_stream_info(fmt_ctx,NULL)<0){
		LOGD("无法读取源文件流信息");
		return;
	}
	
	//因为是音频文件所以就针对音频来
	//第三个参数wanted_stream_nb如果为-1则自动选择,该函数表示:找到最适合的流
	ret=av_find_best_stream(fmt_ctx,AVMEDIA_TYPE_AUDIO,-1,-1,NULL,0);
	
	if(ret<0){
		//av_get_media_type_string函数就是switch-case简单函数返回字符串char*
		LOGD("没有找到%s类型的输入流",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		goto end;
	}else{
		//获取到流下标
		stream_index=ret;
		//注意streams是数组AVStream **streams
		stream=fmt_ctx->streams[stream_index];
		
		//该函数表示:用匹配的codecId找到注册过的解码器
		codec=avcodec_find_decoder(stream->codecpar->codec_id);
		if(!codec){
			LOGD("失败去找到%s类型的解码器",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		    return;
		}
		
		//该函数表示给AVCodecContext分配内存设置默认值,最后记得释放
		audio_codec_ctx=avcodec_alloc_context3(codec);
		if(!audio_codec_ctx){
			LOGD("给AVCodecContext内存分配失败");
		    return;
		}
		
		//该函数表示将输入流的参数信息拷贝给AVCodecContext
		ret=avcodec_parameters_to_context(audio_codec_ctx,stream->codecpar);
		if(ret<0){
			LOGD("给AVCodecContext拷贝参数失败");
		    return;
		}	
		
		//该函数表示:用给定的AVCodec初始化AVCodecContext
		ret=avcodec_open2(audio_codec_ctx,codec,NULL);
		if(ret<0){
			LOGD("打开%s类型的解码器失败",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		    return;
		}
	}
	
	//打开destination文件
	desFile=fopen(destination_path,"wb");
	if(!desFile){
		LOGD("打开destination文件失败");
		goto end;
	}
	
	//swsample
	swr_ctx=swr_alloc();
	if(!swr_ctx){
		LOGD("分配resample context失败");
		goto end;
	}
	
	av_opt_set_int(swr_ctx,"in_channel_layout",audio_codec_ctx->channel_layout,0);
	av_opt_set_int(swr_ctx,"in_sample_rate",audio_codec_ctx->sample_rate,0);
	av_opt_set_sample_fmt(swr_ctx,"in_sample_fmt",audio_codec_ctx->sample_fmt,0);
	
	int channels=av_get_channel_layout_nb_channels(audio_codec_ctx->channel_layout);
	char* fmt_name=av_get_sample_fmt_name(audio_codec_ctx->sample_fmt);
	
	LOGD("channels=%d,sampleRate=%d,sampleFmt=%s",channels,audio_codec_ctx->sample_rate,fmt_name);
	
	av_opt_set_int(swr_ctx,"out_channel_layout",audio_codec_ctx->channel_layout,0);
	av_opt_set_int(swr_ctx,"out_sample_rate",audio_codec_ctx->sample_rate,0);
	av_opt_set_sample_fmt(swr_ctx,"out_sample_fmt",dst_sample_fmt,0);
	
	//该函数表示:设置好参数配置后初始化resample context
	ret=swr_init(swr_ctx);
	if(ret<0){
		LOGD("初始化resample context失败");
		goto end;
	}
	
	LOGD("swr init finished");
	
	//audio_codec_ctx->frame_size---每个音频通道的样本数===>frame->nb_samples
	max_dst_nb_sample=dst_nb_sample=av_rescale_rnd(audio_codec_ctx->frame_size,audio_codec_ctx->sample_rate,audio_codec_ctx->sample_rate,AV_ROUND_UP);
	
	//类似于下面的av_samples_alloc--->从第三个参数开始:通道数、单通道的样本数、样本格式、对齐
	ret=av_samples_alloc_array_and_samples(&dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,0);
	if(ret<0){
		LOGD("分配dst_data失败");
		goto end;
	}
	
	LOGD("to do frame");
	
	frame=av_frame_alloc();
	if(!frame){
		LOGD("无法分配frame");
		goto end;
	}
	
	packet=(AVPacket *)malloc(sizeof(AVPacket));
	av_init_packet(packet);
	packet->data=NULL;
	packet->size=0;
	
	LOGD("start av_read_frame");
	
	//该函数表示:返回流中的一帧
	while((ret=av_read_frame(fmt_ctx,packet))>=0){
		if(packet->stream_index==stream_index){
			do{
				
				int decoded = packet->size;
				got_frame=0;
				ret=0;
				
				//该函数表示:解码音频,第三个参数为零表示no frame
			    ret=avcodec_decode_audio4(audio_codec_ctx,frame,&got_frame,packet);
			    if(ret<0){
				    LOGD("音频解码出错了");
				    goto end;
			    }
				
				decoded = FFMIN(ret, packet->size);
				
				if(got_frame){
					/*如果是平面的话需要转换
					if(av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)){
						
					}*/
					dst_nb_sample=av_rescale_rnd(swr_get_delay(swr_ctx,frame->sample_rate)+frame->nb_samples,frame->sample_rate,frame->sample_rate,AV_ROUND_UP);
					if(dst_nb_sample>max_dst_nb_sample){
						av_freep(&dst_data[0]);
						ret=av_samples_alloc(dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,1);
						if(ret<0){
							LOGD("重新分配dst_data失败");
							break;
						}
						max_dst_nb_sample=dst_nb_sample;
					}
					
					//该函数表示:开启格式转换
					ret=swr_convert(swr_ctx,dst_data,dst_nb_sample,(uint8_t**)frame->data,frame->nb_samples);
					if(ret<0){
						LOGD("swr_convert转换错误");
						goto end;
					}
					
					//该函数表示:通过给定的参数得到需要的buffer size
					int dst_buffer_size=av_samples_get_buffer_size(NULL,channels,ret,dst_sample_fmt,1);
					if(dst_buffer_size<0){
						LOGD("获取样本buffer大小失败");
						goto end;
					}
					
					LOGD("WRITE TO FILE %d",dst_buffer_size);
					
					//write to destination file
					fwrite(dst_data[0],1,dst_buffer_size,desFile);
					
				}
				
			    packet->data+=decoded;
			    packet->size-=decoded;
			}while(packet->size>0);
		}
		
		//过去使用av_free_packet()
		av_free_packet(packet);
	}
	
	LOGD("flush cached frames");
	
	/*flush cached frame*/
	packet->data=NULL;
	packet->size=0;
	do{
		ret=avcodec_decode_audio4(audio_codec_ctx,frame,&got_frame,packet);
		if(ret<0){		
		LOGD("音频解码出错了");
		goto end;
		}
		
		if(got_frame)
		{
			/*如果是平面的话需要转换
			if(av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)){
			}*/
			dst_nb_sample=av_rescale_rnd(swr_get_delay(swr_ctx,frame->sample_rate)+frame->nb_samples,frame->sample_rate,frame->sample_rate,AV_ROUND_UP);
			if(dst_nb_sample>max_dst_nb_sample){
				av_freep(&dst_data[0]);
				ret=av_samples_alloc(dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,1);
				if(ret<0){
				    LOGD("重新分配dst_data失败");
					break;
				}
				max_dst_nb_sample=dst_nb_sample;
				}
					
				//该函数表示:开启格式转换
				ret=swr_convert(swr_ctx,dst_data,dst_nb_sample,(uint8_t**)frame->data,frame->nb_samples);
				if(ret<0){
					LOGD("swr_convert转换错误");
					goto end;
				}
					
				//该函数表示:通过给定的参数得到需要的buffer size
				int dst_buffer_size=av_samples_get_buffer_size(NULL,channels,ret,dst_sample_fmt,1);
				if(dst_buffer_size<0){
					LOGD("获取样本buffer大小失败");
					goto end;
				}
					
				//ffplay -f s16le -ac 2 -ar 44100 -i lky_bhs.pcm 其中le表示小端尾
				//write to destination file
				fwrite(dst_data[0],1,dst_buffer_size,desFile);		
		}
		
	}while(got_frame);
	
	LOGE("解封装解码全部完成!!!");
	
	end:
	    avcodec_free_context(&audio_codec_ctx);
		avformat_close_input(&fmt_ctx);
		if(desFile){
	        fclose(desFile);
		}
	
		if(dst_data){
			av_freep(&dst_data[0]);
		}
		av_freep(&dst_data);
		
		av_frame_free(&frame);
		swr_free(&swr_ctx);
}

二、编译jni (借鉴雷霄骅大神的系列)

# Android.mk for FFmpeg
#
# Lei Xiaohua À×Ïöæè
# [email protected]
# http://blog.csdn.net/leixiaohua1020
# 

LOCAL_PATH := $(call my-dir)

# FFmpeg library
include $(CLEAR_VARS)
LOCAL_MODULE := avcodec
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libavcodec-58.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := avdevice
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libavdevice-58.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := avfilter
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libavfilter-7.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := avformat
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libavformat-58.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := avutil
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libavutil-56.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := postproc
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libpostproc-55.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := swresample
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libswresample-3.so
include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := swscale
LOCAL_SRC_FILES := $(LOCAL_PATH)/libs/libswscale-5.so
include $(PREBUILT_SHARED_LIBRARY)

# Program
include $(CLEAR_VARS)
LOCAL_MODULE := audiotrack_audio
LOCAL_SRC_FILES :=audiotrack_audio.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/include
LOCAL_LDLIBS := -llog -lz
LOCAL_SHARED_LIBRARIES := avcodec avdevice avfilter avformat avutil postproc swresample swscale
include $(BUILD_SHARED_LIBRARY)

三、java代码

package com.example.simpleTestFFmpeg;

import android.os.Environment;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.View;

public class MusicDisplayActivity extends AppCompatActivity {

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_music_display);
    }

    public void decodeMusicToPCM(View view){
        String path= Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MUSIC).getAbsolutePath();

        Log.d("zbv", "path=" + path);

        nativeDecodeMusic(path+"/lky_bhs_mp3.mp3",path+"/test.pcm");
    }

    private native void nativeDecodeMusic(String sourceFilePath,String destMusicPath);

    static {
        System.loadLibrary("avutil-56");
        System.loadLibrary("swresample-3");
        System.loadLibrary("avcodec-58");
        System.loadLibrary("avformat-58");
        System.loadLibrary("swscale-5");
        System.loadLibrary("postproc-55");
        System.loadLibrary("avfilter-7");
        System.loadLibrary("avdevice-58");
        System.loadLibrary("music_ffmpeg");
    }
}

四、拓展c代码实现AudioTrack播放pcm音频audiotrack_audio.c(ps:c代码调用java)

//--------------------------安卓的log
#include <jni.h>
#include <android/log.h>
#define LOG_TAG "zbv"
#define  LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#define  LOGE(...)  __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
//--------------------------安卓的log

//因为函数的申明包含重复没关系,重点是同一个文件不要有重复包含就可以了

//memcpy
#include <string.h>

//--------------------------ffmpeg
//格式封装
#include <libavformat/avformat.h>
//编解码
#include <libavcodec/avcodec.h>
//重采样
#include <libswresample/swresample.h>
//设置采样参数
#include <libavutil/opt.h>
//--------------------------ffmpeg


#ifdef __cplusplus
extern "C" {
#endif

static const enum AVSampleFormat dst_sample_fmt=AV_SAMPLE_FMT_S16;


/*
* Android jni 的基础必备知识,也是本程序所必须知道的get point
* One: jni.h在java所在安装目录的jre\include\jni.h
* Two: 基本数据类型的对应如下:
       void <-> void; boolean <-> jboolean; char <-> jchar; float <-> jfloat; double <-> jdouble; 
	   byte <-> jbyte; short <-> jshort; int <->jint; long <-> jlong;
	   引用对象类型如下;
	   String <-> jstring; Object <-> jobject; byte[] <-> jbyteArray; object[] <-> jobjectArray
* Three:一般我们使用jni通常用于Java调用C语言代码,而这次我们需要C代码中调用Java代码:
         数据类型和签名:
		 boolean => Z; char => C; float => F; double => D; byte => B; short -> S; int => I; long => L; void => V
         object => Lxxx(例如String => Ljava/lang/String; 结尾需要分号)	Array => [xxx(例如int[] => [I;object[] => [Ljava/lang/Object;)	
		 
* Four:<p>属性:访问静态和非静态属性</p>、<p>方法:访问静态和非静态方法</p>、<p>构造方法:访问构造方法</p>		

* ps:这里第二个参数写作jclass或者jobject都可以
*/
JNIEXPORT void Java_com_example_simpleTestFFmpeg_AudioTractMusicActivity_decodeMusicPlayByAudioTrack(JNIEnv* env,jclass clazz,jstring sourcePath){
	
	AVFormatContext* fmt_ctx=NULL;//必须初始化不然open_input会挂
	int ret,dst_nb_sample,max_dst_nb_sample,stream_index,got_frame;
	AVStream* stream;
	AVCodec* codec;
	AVCodecContext* audio_codec_ctx;
	struct SwrContext* swr_ctx;
	AVFrame* frame;
	AVPacket* packet;
	
	uint8_t **dst_data=NULL;
	
	//第一步:获取从Java层传来的参数字符串 sourcePath ->方法可以从jni.h中查找
	//c文件使用*env,c++文件直接使用env--->jni.h中将c调用和c++调用分开来写了相应调用方法,可在jni.h该文件中仔细查找调用
	const char* original_path=(*env)->GetStringUTFChars(env,sourcePath,NULL);
	
	LOGE("原始文件path=%s",original_path);
	
	//第二步:开启FFMPEG的初始化操作
	//av_register_all();---deprecated
	//avformat_network_init();---如果不是使用老版本的OpenSSL没有必要使用

	//第三个参数是AVInputFormat* 如果是NULL则会自动被检测---该函数表示:打开一个输入流读取header
	if(avformat_open_input(&fmt_ctx,original_path,NULL,NULL)<0){
		LOGD("无法打开源文件");
		return;
	}
	
	//该函数表示:读取媒体文件的packets去获取流信息
	if(avformat_find_stream_info(fmt_ctx,NULL)<0){
		LOGD("无法读取源文件流信息");
		return;
	}
	
	//因为是音频文件所以就针对音频来
	//第三个参数wanted_stream_nb如果为-1则自动选择,该函数表示:找到最适合的流
	ret=av_find_best_stream(fmt_ctx,AVMEDIA_TYPE_AUDIO,-1,-1,NULL,0);
	
	if(ret<0){
		//av_get_media_type_string函数就是switch-case简单函数返回字符串char*
		LOGD("没有找到%s类型的输入流",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		goto end;
	}else{
		//获取到流下标
		stream_index=ret;
		//注意streams是数组AVStream **streams
		stream=fmt_ctx->streams[stream_index];
		
		//通过AVStream的id查找到相应的解码器
		codec=avcodec_find_decoder(stream->codecpar->codec_id);
		
		if(!codec){
			LOGD("失败去找到%s类型的解码器",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		    return;
		}
			
		//该函数表示给AVCodecContext分配内存设置默认值,最后记得释放
		audio_codec_ctx=avcodec_alloc_context3(codec);
		if(!audio_codec_ctx){
			LOGD("给AVCodecContext内存分配失败");
		    return;
		}
		
		//该函数表示将输入流的参数信息拷贝给AVCodecContext
		ret=avcodec_parameters_to_context(audio_codec_ctx,stream->codecpar);
		if(ret<0){
			LOGD("给AVCodecContext拷贝参数失败");
		    return;
		}	
		
		//该函数表示:用给定的AVCodec初始化AVCodecContext,打开解码器
		ret=avcodec_open2(audio_codec_ctx,codec,NULL);
		if(ret<0){
			LOGD("打开%s类型的解码器失败",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		    return;
		}
	}
	
	
	//swsample
	swr_ctx=swr_alloc();
	if(!swr_ctx){
		LOGD("分配resample context失败");
		goto end;
	}
	
	av_opt_set_int(swr_ctx,"in_channel_layout",audio_codec_ctx->channel_layout,0);
	av_opt_set_int(swr_ctx,"in_sample_rate",audio_codec_ctx->sample_rate,0);
	av_opt_set_sample_fmt(swr_ctx,"in_sample_fmt",audio_codec_ctx->sample_fmt,0);
	
	//通道数
	int channels=av_get_channel_layout_nb_channels(audio_codec_ctx->channel_layout);
	char* fmt_name=av_get_sample_fmt_name(audio_codec_ctx->sample_fmt);
	
	LOGD("channels=%d,sampleRate=%d,sampleFmt=%s",channels,audio_codec_ctx->sample_rate,fmt_name);
	
	av_opt_set_int(swr_ctx,"out_channel_layout",audio_codec_ctx->channel_layout,0);
	av_opt_set_int(swr_ctx,"out_sample_rate",audio_codec_ctx->sample_rate,0);
	av_opt_set_sample_fmt(swr_ctx,"out_sample_fmt",dst_sample_fmt,0);
	
	//该函数表示:设置好参数配置后初始化resample context
	ret=swr_init(swr_ctx);
	if(ret<0){
		LOGD("初始化resample context失败");
		goto end;
	}
	
	//audio_codec_ctx->frame_size---每个音频通道的样本数===>frame->nb_samples
	max_dst_nb_sample=dst_nb_sample=av_rescale_rnd(audio_codec_ctx->frame_size,audio_codec_ctx->sample_rate,audio_codec_ctx->sample_rate,AV_ROUND_UP);
	
	//类似于下面的av_samples_alloc--->从第三个参数开始:通道数、单通道的样本数、样本格式、对齐
	ret=av_samples_alloc_array_and_samples(&dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,0);
	if(ret<0){
		LOGD("分配dst_data失败");
		goto end;
	}
	
	frame=av_frame_alloc();
	if(!frame){
		LOGD("无法分配frame");
		goto end;
	}
	
	packet=(AVPacket *)malloc(sizeof(AVPacket));
	av_init_packet(packet);
	packet->data=NULL;
	packet->size=0;
	
	//--------------------获取AudioTrack
	//获取到使用该native方法所在的类
	jclass my_java_class=(*env)->GetObjectClass(env,clazz);
	jmethodID audiotrack_id=(*env)->GetMethodID(env,my_java_class,"createAudioTrack","(II)Landroid/media/AudioTrack;");
	jobject audio_track=(*env)->CallObjectMethod(env,clazz,audiotrack_id,audio_codec_ctx->sample_rate,channels);
	//play
	jclass audiotrack_class=(*env)->GetObjectClass(env,audio_track);
	jmethodID at_play=(*env)->GetMethodID(env,audiotrack_class,"play","()V");
	(*env)->CallVoidMethod(env,audio_track,at_play);
	//wirte--->获取methodID
	jmethodID at_write=(*env)->GetMethodID(env,audiotrack_class,"write","([BII)I");
	//--------------------获取AudioTrack
	
	LOGD("start av_read_frame");
	
	//该函数表示:返回流中的一帧
	while((ret=av_read_frame(fmt_ctx,packet))>=0){
		if(packet->stream_index==stream_index){
			do{
				
				int decoded = packet->size;
				got_frame=0;
				ret=0;
				
				//该函数表示:解码音频,第三个参数为零表示no frame
			    ret=avcodec_decode_audio4(audio_codec_ctx,frame,&got_frame,packet);
			    if(ret<0){
				    LOGD("音频解码出错了");
				    goto end;
			    }
				
				decoded = FFMIN(ret, packet->size);
				
				if(got_frame){
					/*如果是平面的话需要转换
					if(av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)){
						
					}*/
					dst_nb_sample=av_rescale_rnd(swr_get_delay(swr_ctx,frame->sample_rate)+frame->nb_samples,frame->sample_rate,frame->sample_rate,AV_ROUND_UP);
					if(dst_nb_sample>max_dst_nb_sample){
						av_freep(&dst_data[0]);
						ret=av_samples_alloc(dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,1);
						if(ret<0){
							LOGD("重新分配dst_data失败");
							break;
						}
						max_dst_nb_sample=dst_nb_sample;
					}
					
					//该函数表示:开启格式转换
					ret=swr_convert(swr_ctx,dst_data,dst_nb_sample,(uint8_t**)frame->data,frame->nb_samples);
					if(ret<0){
						LOGD("swr_convert转换错误");
						goto end;
					}
					
					//该函数表示:通过给定的参数得到需要的buffer size
					int dst_buffer_size=av_samples_get_buffer_size(NULL,channels,ret,dst_sample_fmt,1);
					if(dst_buffer_size<0){
						LOGD("获取样本buffer大小失败");
						goto end;
					}
					
					LOGD("WRITE TO AUDIOTRACK %d",dst_buffer_size);
					
					//audiotrack write
					jbyteArray audio_sample_array=(*env)->NewByteArray(env,dst_buffer_size);
					jbyte* sample_byte=(*env)->GetByteArrayElements(env,audio_sample_array,NULL);
					//copy memory
					memcpy(sample_byte,dst_data[0],dst_buffer_size);
					//最后一个参数一般为0或者JNI_COMMIT
					(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_byte,0);
					//write
					(*env)->CallIntMethod(env,audio_track,at_write,audio_sample_array,0,dst_buffer_size);
					//释放局部引用
                    (*env)->DeleteLocalRef(env, audio_sample_array);
					
					
				}
				
			    packet->data+=decoded;
			    packet->size-=decoded;
			}while(packet->size>0);
		}
		
		//过去使用av_free_packet()
		av_free_packet(packet);
	}
	
	/*flush cached frame*/
	packet->data=NULL;
	packet->size=0;
	do{
		ret=avcodec_decode_audio4(audio_codec_ctx,frame,&got_frame,packet);
		if(ret<0){		
		LOGD("音频解码出错了");
		goto end;
		}
		
		if(got_frame)
		{
			/*如果是平面的话需要转换
			if(av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)){
			}*/
			dst_nb_sample=av_rescale_rnd(swr_get_delay(swr_ctx,frame->sample_rate)+frame->nb_samples,frame->sample_rate,frame->sample_rate,AV_ROUND_UP);
			if(dst_nb_sample>max_dst_nb_sample){
				av_freep(&dst_data[0]);
				ret=av_samples_alloc(dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,1);
				if(ret<0){
				    LOGD("重新分配dst_data失败");
					break;
				}
				max_dst_nb_sample=dst_nb_sample;
				}
					
				//该函数表示:开启格式转换
				ret=swr_convert(swr_ctx,dst_data,dst_nb_sample,(uint8_t**)frame->data,frame->nb_samples);
				if(ret<0){
					LOGD("swr_convert转换错误");
					goto end;
				}
					
				//该函数表示:通过给定的参数得到需要的buffer size
				int dst_buffer_size=av_samples_get_buffer_size(NULL,channels,ret,dst_sample_fmt,1);
				if(dst_buffer_size<0){
					LOGD("获取样本buffer大小失败");
					goto end;
				}
				
				LOGD("LAST WRITE TO AUDIOTRACK %d",dst_buffer_size);
					
				//audiotrack write
				jbyteArray audio_sample_array=(*env)->NewByteArray(env,dst_buffer_size);
				jbyte* sample_byte=(*env)->GetByteArrayElements(env,audio_sample_array,NULL);
				//copy memory
				memcpy(sample_byte,dst_data[0],dst_buffer_size);
				//最后一个参数一般为0或者JNI_COMMIT
				(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_byte,0);
				//write
				(*env)->CallIntMethod(env,audio_track,at_write,audio_sample_array,0,dst_buffer_size);
				//释放局部引用
                (*env)->DeleteLocalRef(env, audio_sample_array);
					
			
		}
		
	}while(got_frame);
	
	LOGE("解封装解码全部完成!!!");
	
	end:
	    avcodec_free_context(&audio_codec_ctx);
		avformat_close_input(&fmt_ctx);
	
		if(dst_data){
			av_freep(&dst_data[0]);
		}
		av_freep(&dst_data);
		
		av_frame_free(&frame);
		swr_free(&swr_ctx);
		
		//释放掉传入字符串的内存
		(*env)->ReleaseStringUTFChars(env,sourcePath,original_path);
	
}


#ifdef __cplusplus
}
#endif

五、AudioTrack播放Activity

package com.example.simpleTestFFmpeg;

import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.os.Bundle;
import android.os.Environment;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

public class AudioTractMusicActivity extends AppCompatActivity {

    static {
        System.loadLibrary("avutil-56");
        System.loadLibrary("swresample-3");
        System.loadLibrary("avcodec-58");
        System.loadLibrary("avformat-58");
        System.loadLibrary("swscale-5");
        System.loadLibrary("postproc-55");
        System.loadLibrary("avfilter-7");
        System.loadLibrary("avdevice-58");
        System.loadLibrary("audiotrack_audio");
    }


    /*
            file:///android_asset/
            android.resource://package_name/raw/
            Uri uri = Uri.parse("android.resource://" + getPackageName() + "/"+ R.raw.protect_environment);
    * */

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_audio_tract_music);

        createAudioTrack(44100, 2);


    }

    private AudioTrack userAudioTrack;

    /**
     * 创建AudioTrack用于播放PCM音频数据
     * <br/>
     * 重点是:采样率、通道配置、量化参数即采样的字节编码格式
     */
    public AudioTrack createAudioTrack(int sampleRate, int nb_channels) {

        /*
         * 1、音频流类型:streanType
         * 2、音频采样率:sampleRateInHz
         * 3、音频通道配置:channelConfig
         * 4、音频的采样格式(量化/编码格式):audioFormat
         * 5、字节缓冲大小:bufferSizeInBytes
         * 6、字节缓冲模式:mode
         * 7、sessionId:和AudioTrack绑定的id
         * //-------------------------------------------
         * 1、attribute
         * 2、format
         * 3、bufferSizeInBytes
         * 4、mode
         * 5、SessionId
         * */
//        AudioAttributes audioAttributes=new AudioAttributes.Builder().;
//        AudioFormat audioFormat=new AudioFormat.Builder().

        int audioFormat = AudioFormat.ENCODING_PCM_16BIT;

        int channelConfig;
        if (nb_channels == 1) {
            channelConfig = AudioFormat.CHANNEL_OUT_MONO;
        } else if (nb_channels == 2) {
            channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
        } else {
            channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
        }

        int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);

        AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig, audioFormat
                , bufferSizeInBytes, AudioTrack.MODE_STREAM);


        userAudioTrack = audioTrack;

        return audioTrack;
    }

    private native void decodeMusicPlayByAudioTrack(String sourcePath);

    /**
     * 解码播放PCM
     */
    public void decodePlayPCM(View view) {

        decodeMusicPlayByAudioTrack(
                Environment.getExternalStoragePublicDirectory
                        (Environment.DIRECTORY_MUSIC).getAbsolutePath() + "/lky_bhs_mp3.mp3");

    }

    private boolean isEnd = true;

    /**
     * BUTTON
     * 注意需要开线程
     */
    public void playPCM(View view) {

        if (!isEnd) {
            return;
        }
        isEnd = false;

        /*
         * 开线程 ExecutorService
         *
         *  //创建单线程化的线程池
         *  ExecutorService executorService= Executors.newSingleThreadExecutor();

         *  //创建定长线程池,控制线程最大的并发数,超出的线程会在线程池中等待
         *  ExecutorService executorService1=Executors.newFixedThreadPool(1);

         *  //创建定长线程池,支持定时、周期性执行任务---可以取代定时器Timer
         *  ExecutorService executorService2=Executors.newScheduledThreadPool(1);

         *  //创建可缓存线程池,无限大,比较灵活,第一个线程处理完任务后,第二个线程可复用第一个
         *  ExecutorService executorService3=Executors.newCachedThreadPool();
         * */

        ExecutorService executorService = Executors.newSingleThreadExecutor();

        executorService.execute(new Runnable() {
            @Override
            public void run() {
                String path = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MUSIC).getAbsolutePath();

                Log.d("zbv", "path=" + path);
                try {
                    FileInputStream fis = new FileInputStream(path + "/lky_bhs.pcm");

                    //因为是AudioTrack.MODE_STREAM所以可以一边写一边播放,如果是MODE_STATIC则需要全部写入track才可以播放

                    userAudioTrack.play();

                    while (!isEnd) {
                        if (userAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
                            byte[] bytes = new byte[1024];
                            int ret = -1;
                            while ((ret = fis.read(bytes)) != -1) {
                                Log.d("zbv", "byte length=" + ret);
                                userAudioTrack.write(bytes, 0, ret);

                                if (ret < 1024) {
                                    isEnd = true;
                                }
                            }
                        } else {
                            try {
                                Thread.sleep(40);
                            } catch (InterruptedException e) {
                                e.printStackTrace();
                            }
                        }
                    }

                    fis.close();
                    userAudioTrack.stop();

                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }

    public void stopPCM(View view) {
        if (userAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
            userAudioTrack.pause();
            ((Button) view).setText("开始播放PCM");
        } else if (userAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PAUSED) {
            userAudioTrack.play();
            ((Button) view).setText("暂停播放PCM");
        }
    }
}

六、c代码实现补充解封装解码视频文件为原始yuv和pcm(demux_decode.c)

#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
#include <string.h>

//添加安卓Log
#include <jni.h>
#include <android/log.h>
#define LOG_TAG "zbv"
#define  LOGI(...)  __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define  LOGE(...)  __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)

#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio

static FILE *video_file=NULL,*audio_file=NULL;
static SwrContext *au_convert_ctx;
static uint8_t *out_buffer;
static int out_buffer_size;

//函数声明
int open_codec_context(int*,AVFormatContext*,enum AVMediaType);
int decode_packet(int*,int,const int,const int,const AVPacket*,AVCodecContext*,AVCodecContext*,AVFrame*);

//主被调函数
void Java_com_example_simpleTestFFmpeg_MP4DecoderActivity_decodeFile(JNIEnv* env, jclass clazz,jstring sourceFile,jstring videoFile,jstring audioFile){
	
	const char *fileName = (*env)->GetStringUTFChars(env, sourceFile, NULL);
	
	const char *saveVideoFileName = (*env)->GetStringUTFChars(env, videoFile, NULL);
	
	const char *saveAudioFileName = (*env)->GetStringUTFChars(env, audioFile, NULL);
	
	LOGI("sourceFile=%s,videoFile=%s,audioFile=%s",fileName,saveVideoFileName,saveAudioFileName);
	
	AVFormatContext *fmt_ctx=NULL;
	
	int stream_video_id=-1,stream_audio_id=-1;
	
	AVStream *video_stream=NULL,*audio_stream=NULL;
	
	AVCodecContext *vc_video_ctx=NULL,*vc_audio_ctx=NULL;
	
	AVFrame* frame=NULL;
	
	AVPacket* packet=NULL;
	
	int got_frame;
	
	av_register_all();
	
	//打开源文件文件并且成功的话为fmt_ctx分配内存
	if(avformat_open_input(&fmt_ctx,fileName,NULL,NULL)!=0){
		LOGE("could not open source file %s",fileName);
		return;
	}
	
	//提取文件流中的数据信息
	if(avformat_find_stream_info(fmt_ctx,NULL)<0){
		LOGE("could not find stream info");
		return;
	}
	
	
	
	
	//视频-打开解码器
	if(open_codec_context(&stream_video_id,fmt_ctx,AVMEDIA_TYPE_VIDEO)==0){
		video_stream=fmt_ctx->streams[stream_video_id];
		vc_video_ctx=video_stream->codec;
		
		//初始化输出视频文件
		video_file=fopen(saveVideoFileName,"wb");
		if(!video_file){
			LOGE("could not open destination video file");
			goto end;
		}
		
	}
	
	//音频-打开解码器
	if(open_codec_context(&stream_audio_id,fmt_ctx,AVMEDIA_TYPE_AUDIO)==0){
		audio_stream=fmt_ctx->streams[stream_audio_id];
		vc_audio_ctx=audio_stream->codec;
		
		//初始化输出音频文件
		audio_file=fopen(saveAudioFileName,"wb");
		if(!audio_file){
			LOGE("could not open destination audio file");
			goto end;
		}
	}
	
	LOGE("nbStream=%d and duration=%ld miao",fmt_ctx->nb_streams,fmt_ctx->duration/1000/1000);
	
	//如果失败为NULL
	frame=av_frame_alloc();
	if(!frame){
		LOGE("could not alloc frame");
		goto end;
	}
	
	LOGI("Audio Out Format");
	
	//Out Audio Param --------------------------------------------------------------
	uint64_t out_channel_layout=AV_CH_LAYOUT_STEREO;
	//AAC:1024  MP3:1152
	int out_nb_samples=vc_audio_ctx->frame_size;
	enum AVSampleFormat out_sample_fmt=AV_SAMPLE_FMT_S16;
	int out_sample_rate=44100;
	int out_channels=av_get_channel_layout_nb_channels(out_channel_layout);
	//Out Buffer Size
	out_buffer_size=av_samples_get_buffer_size(NULL,out_channels ,out_nb_samples,out_sample_fmt, 1);
 
	out_buffer=(uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE*2);
	
	int64_t in_channel_layout=av_get_default_channel_layout(vc_audio_ctx->channels);
	
	LOGI("swr");
	
    //Swr
	au_convert_ctx=swr_alloc_set_opts(NULL,out_channel_layout, out_sample_fmt, out_sample_rate,
		in_channel_layout,vc_audio_ctx->sample_fmt , vc_audio_ctx->sample_rate,0, NULL);
	swr_init(au_convert_ctx);
	//---------------------------------------------------------------------------------------------
	
	LOGI("initialize packet");
	
	packet=(AVPacket *)malloc(sizeof(AVPacket));
	
	/* initialize packet, set data to NULL, let the demuxer fill it */
	av_init_packet(packet);
	packet->data=NULL;
	packet->size=0;
	
	LOGI("start av_read_frame");
	//read frame
	//0 if OK, < 0 on error or end of file 
	int ret=-1;
	while(av_read_frame(fmt_ctx,packet)>=0){
		LOGI("data=%d and size=%d",packet->data,packet->size);
		AVPacket* pkt=packet;
		//解码
		do{
			ret=decode_packet(&got_frame,0,stream_video_id,stream_audio_id,packet,vc_video_ctx,vc_audio_ctx,frame);
			if(ret<0){
				break;
			}
			packet->data+=ret;
			packet->size-=ret;
		}while(packet->size>0);
		av_free_packet(pkt);
	}
	
	LOGI("flush cached frames");
	
	 /* flush cached frames */
	 packet->data=NULL;
	 packet->size=0;
	 do{
		 decode_packet(&got_frame,1,stream_video_id,stream_audio_id,packet,vc_video_ctx,vc_audio_ctx,frame);
	 }while(got_frame);
	 
	 LOGI("Demuxing succeeded ");
	 
	 /*
	 enum AVSampleFormat sfmt=vc_audio_ctx->sample_fmt;
	 int n_channels=vc_audio_ctx->channels;
	 //1 if the sample format is planar, 0 if it is interleaved(交叉扫描) 
	 if(av_sample_fmt_is_planar(sfmt)){
		 
	 }
	 */
	
	
	end:
	    avcodec_close(&vc_video_ctx);
		avcodec_close(&vc_audio_ctx);
		avformat_close_input(&fmt_ctx);
		if(video_file){
			fclose(video_file);
		}
		if(audio_file){
			fclose(audio_file);
		}
		av_frame_free(&frame);
	
}

//最后给stream_id重新赋值通过指针---等于0表示成功
int open_codec_context(int* stream_id,AVFormatContext* fmt_ctx,enum AVMediaType type){
	int ret,stream_index;
	//得到媒体流类型
	char* media_type=av_get_media_type_string(type);
	AVStream* av_stream;
	AVCodecContext* acode_ctx;
	AVCodec* av_codec;
	
	ret=av_find_best_stream(fmt_ctx,type,-1,-1,NULL,0);
	if(ret<0){
		LOGE("could not find %s stream in source file",media_type);
		return ret;
	}
	stream_index=ret;
	av_stream=fmt_ctx->streams[stream_index];
	//获取到AVCodecContext
	acode_ctx=av_stream->codec;
	//获取解码器
	av_codec=avcodec_find_decoder(acode_ctx->codec_id);
	if(av_codec==NULL){
		LOGE("failed find %s type of codec",media_type);
		return AVERROR(EINVAL);
	}
	
	//打开解码器,初始化AVCodecContext
	ret=avcodec_open2(acode_ctx,av_codec,NULL);
	if(ret!=0){
		LOGE("failed find %s type of codec",media_type);
		return ret;
	}
	
	*stream_id=stream_index;
	
	return ret;
	
}

//从AVPacket中解码-需要传入标记结束的int* 缓存int 视频id 音频id以及AVPacket*
int decode_packet(int* got_frame,int cached,const int stream_video_id,const int stream_audio_id,const AVPacket* packet,
                  AVCodecContext* video_ctx,AVCodecContext* audio_ctx,AVFrame* frame){
	int ret=0;
	int decoded=packet->size;
	*got_frame=0;
	
	LOGI("pts=%ld size=%d",packet->pts,packet->size);
	
	if(packet->stream_index==stream_video_id){
		
		/*解视频码   
		On error a negative value is returned, otherwise the number of bytes used or zero if no frame could be decompressed. */
		ret=avcodec_decode_video2(video_ctx,frame,got_frame,packet);
		if(ret<0){
			LOGE("error decode video frame %s",av_err2str(ret));
			return ret;
		}
		
		//不为0表示还有待解码的数据
		if(*got_frame){
			LOGI("decode video");
		    int y_size=video_ctx->width*video_ctx->height;  
			fwrite(frame->data[0],1,y_size,video_file);    //Y 
			fwrite(frame->data[1],1,y_size/4,video_file);  //U
			fwrite(frame->data[2],1,y_size/4,video_file);  //V
			
		}
		
		
	}else if(packet->stream_index==stream_audio_id){
		
		//解音频码
		ret=avcodec_decode_audio4(audio_ctx,frame,got_frame,packet);
		if(ret<0){
			LOGE("error decode audio frame %s",av_err2str(ret));
			return ret;
		}
		
		if(*got_frame){
			LOGI("decode audio");
			
			swr_convert(au_convert_ctx,&out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame->data , frame->nb_samples);
			
			
			//Write PCM
			fwrite(out_buffer, 1, out_buffer_size, audio_file);
		}
	}
	
	
}

七、使用新版ffmpegAPI通过AudioTrack播放pcm音频

//--------------------------安卓的log
#include <jni.h>
#include <android/log.h>
#define LOG_TAG "zbv"
#define  LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#define  LOGE(...)  __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
//--------------------------安卓的log

//因为函数的申明包含重复没关系,重点是同一个文件不要有重复包含就可以了

//memcpy
#include <string.h>

//--------------------------ffmpeg
//格式封装
#include <libavformat/avformat.h>
//编解码
#include <libavcodec/avcodec.h>
//重采样
#include <libswresample/swresample.h>
//设置采样参数
#include <libavutil/opt.h>
//--------------------------ffmpeg


#ifdef __cplusplus
extern "C" {
#endif

static const enum AVSampleFormat dst_sample_fmt=AV_SAMPLE_FMT_S16;

JNIEXPORT void Java_com_example_simpleTestFFmpeg_AudioTractMusicActivity_decodeMusicPlayByAudioTrack(JNIEnv* env,jclass clazz,jstring sourcePath){
	
	AVFormatContext* fmt_ctx=NULL;//必须初始化不然open_input会挂
	int ret,dst_nb_sample,max_dst_nb_sample,stream_index,got_frame;
	AVStream* stream;
	AVCodec* codec;
	AVCodecContext* audio_codec_ctx;
	struct SwrContext* swr_ctx;
	AVFrame* frame;
	AVPacket* packet;
	
	uint8_t **dst_data=NULL;
	
	//第一步:获取从Java层传来的参数字符串 sourcePath ->方法可以从jni.h中查找
	//c文件使用*env,c++文件直接使用env--->jni.h中将c调用和c++调用分开来写了相应调用方法,可在jni.h该文件中仔细查找调用
	const char* original_path=(*env)->GetStringUTFChars(env,sourcePath,NULL);
	
	LOGE("原始文件path=%s",original_path);
	
	//第二步:开启FFMPEG的初始化操作
	//av_register_all();---deprecated
	//avformat_network_init();---如果不是使用老版本的OpenSSL没有必要使用

	//第三个参数是AVInputFormat* 如果是NULL则会自动被检测---该函数表示:打开一个输入流读取header
	if(avformat_open_input(&fmt_ctx,original_path,NULL,NULL)<0){
		LOGD("无法打开源文件");
		return;
	}
	
	//该函数表示:读取媒体文件的packets去获取流信息
	if(avformat_find_stream_info(fmt_ctx,NULL)<0){
		LOGD("无法读取源文件流信息");
		return;
	}
	
	//因为是音频文件所以就针对音频来
	//第三个参数wanted_stream_nb如果为-1则自动选择,该函数表示:找到最适合的流
	ret=av_find_best_stream(fmt_ctx,AVMEDIA_TYPE_AUDIO,-1,-1,NULL,0);
	
	if(ret<0){
		//av_get_media_type_string函数就是switch-case简单函数返回字符串char*
		LOGD("没有找到%s类型的输入流",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		goto end;
	}else{
		//获取到流下标
		stream_index=ret;
		//注意streams是数组AVStream **streams
		stream=fmt_ctx->streams[stream_index];
		
		//通过AVStream的id查找到相应的解码器
		codec=avcodec_find_decoder(stream->codecpar->codec_id);
		
		if(!codec){
			LOGD("失败去找到%s类型的解码器",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		    return;
		}
			
		//该函数表示给AVCodecContext分配内存设置默认值,最后记得释放
		audio_codec_ctx=avcodec_alloc_context3(codec);
		if(!audio_codec_ctx){
			LOGD("给AVCodecContext内存分配失败");
		    return;
		}
		
		//该函数表示将输入流的参数信息拷贝给AVCodecContext
		ret=avcodec_parameters_to_context(audio_codec_ctx,stream->codecpar);
		if(ret<0){
			LOGD("给AVCodecContext拷贝参数失败");
		    return;
		}	
		
		//该函数表示:用给定的AVCodec初始化AVCodecContext,打开解码器
		ret=avcodec_open2(audio_codec_ctx,codec,NULL);
		if(ret<0){
			LOGD("打开%s类型的解码器失败",av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
		    return;
		}
	}
	
	
	//swsample
	swr_ctx=swr_alloc();
	if(!swr_ctx){
		LOGD("分配resample context失败");
		goto end;
	}
	
	av_opt_set_int(swr_ctx,"in_channel_layout",audio_codec_ctx->channel_layout,0);
	av_opt_set_int(swr_ctx,"in_sample_rate",audio_codec_ctx->sample_rate,0);
	av_opt_set_sample_fmt(swr_ctx,"in_sample_fmt",audio_codec_ctx->sample_fmt,0);
	
	//通道数
	int channels=av_get_channel_layout_nb_channels(audio_codec_ctx->channel_layout);
	const char* fmt_name=av_get_sample_fmt_name(audio_codec_ctx->sample_fmt);
	
	LOGD("channels=%d,sampleRate=%d,sampleFmt=%s",channels,audio_codec_ctx->sample_rate,fmt_name);
	
	av_opt_set_int(swr_ctx,"out_channel_layout",audio_codec_ctx->channel_layout,0);
	av_opt_set_int(swr_ctx,"out_sample_rate",audio_codec_ctx->sample_rate,0);
	av_opt_set_sample_fmt(swr_ctx,"out_sample_fmt",dst_sample_fmt,0);
	
	//该函数表示:设置好参数配置后初始化resample context
	ret=swr_init(swr_ctx);
	if(ret<0){
		LOGD("初始化resample context失败");
		goto end;
	}
	
	//audio_codec_ctx->frame_size---每个音频通道的样本数===>frame->nb_samples
	max_dst_nb_sample=dst_nb_sample=av_rescale_rnd(audio_codec_ctx->frame_size,audio_codec_ctx->sample_rate,audio_codec_ctx->sample_rate,AV_ROUND_UP);
	
	//类似于下面的av_samples_alloc--->从第三个参数开始:通道数、单通道的样本数、样本格式、对齐
	ret=av_samples_alloc_array_and_samples(&dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,0);
	if(ret<0){
		LOGD("分配dst_data失败");
		goto end;
	}
	
	frame=av_frame_alloc();
	if(!frame){
		LOGD("无法分配frame");
		goto end;
	}
	
	packet=(AVPacket *)malloc(sizeof(AVPacket));
	av_init_packet(packet);
	packet->data=NULL;
	packet->size=0;
	
	//--------------------获取AudioTrack
	//获取到使用该native方法所在的类
	jclass my_java_class=(*env)->GetObjectClass(env,clazz);
	jmethodID audiotrack_id=(*env)->GetMethodID(env,my_java_class,"createAudioTrack","(II)Landroid/media/AudioTrack;");
	jobject audio_track=(*env)->CallObjectMethod(env,clazz,audiotrack_id,audio_codec_ctx->sample_rate,channels);
	//play
	jclass audiotrack_class=(*env)->GetObjectClass(env,audio_track);
	jmethodID at_play=(*env)->GetMethodID(env,audiotrack_class,"play","()V");
	(*env)->CallVoidMethod(env,audio_track,at_play);
	//wirte--->获取methodID
	jmethodID at_write=(*env)->GetMethodID(env,audiotrack_class,"write","([BII)I");
	//--------------------获取AudioTrack
	
	LOGD("start av_read_frame");
	
	//该函数表示:返回流中的一帧
	while((ret=av_read_frame(fmt_ctx,packet))>=0){
		if(packet->stream_index==stream_index){
			
			/* send the packet with the compressed data to the decoder */
			ret=avcodec_send_packet(audio_codec_ctx,packet);
			if(ret<0){
				LOGD("音频解码AVPacket出错了");
				goto end;
			}
			
			/* read all the output frames (in general there may be any number of them */
	               while(ret>=0){
				ret=avcodec_receive_frame(audio_codec_ctx,frame);
				if(ret==AVERROR(EAGAIN) || ret==AVERROR_EOF){
					break;
				}else if(ret<0){
					LOGD("音频解码AVFrame出错了");
					goto end;
				}	
	                        dst_nb_sample=av_rescale_rnd(swr_get_delay(swr_ctx,frame->sample_rate)+frame->nb_samples,frame->sample_rate,frame->sample_rate,AV_ROUND_UP);
				if(dst_nb_sample>max_dst_nb_sample){
					av_freep(&dst_data[0]);
					ret=av_samples_alloc(dst_data,NULL,channels,dst_nb_sample,dst_sample_fmt,1);
					if(ret<0){
					    LOGD("重新分配dst_data失败");
					    break;
					}
					max_dst_nb_sample=dst_nb_sample;
					}
					
					//该函数表示:开启格式转换
					ret=swr_convert(swr_ctx,dst_data,dst_nb_sample,(uint8_t**)frame->data,frame->nb_samples);
					if(ret<0){
						LOGD("swr_convert转换错误");
						goto end;
					}
					
					//该函数表示:通过给定的参数得到需要的buffer size
					int dst_buffer_size=av_samples_get_buffer_size(NULL,channels,ret,dst_sample_fmt,1);
					if(dst_buffer_size<0){
						LOGD("获取样本buffer大小失败");
						goto end;
					}
					
					//MP3每帧是1152字节,ACC每帧是1024/2048字节
					LOGD("WRITE TO AUDIOTRACK %d",dst_buffer_size);//4608
					
					//audiotrack write
					jbyteArray audio_sample_array=(*env)->NewByteArray(env,dst_buffer_size);
					jbyte* sample_byte=(*env)->GetByteArrayElements(env,audio_sample_array,NULL);
					//copy memory
					memcpy(sample_byte,dst_data[0],dst_buffer_size);
					//最后一个参数一般为0或者JNI_COMMIT
					(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_byte,0);
					//write
					(*env)->CallIntMethod(env,audio_track,at_write,audio_sample_array,0,dst_buffer_size);
					//释放局部引用
                                       (*env)->DeleteLocalRef(env, audio_sample_array);
				
			}
		}
		
		//过去使用av_free_packet()
		av_packet_unref(packet);
	}
	
	LOGE("解封装解码全部完成!!!");
	
	end:
	    avcodec_free_context(&audio_codec_ctx);
		avformat_close_input(&fmt_ctx);
	
		if(dst_data){
			av_freep(&dst_data[0]);
		}
		av_freep(&dst_data);
		
		av_frame_free(&frame);
		swr_free(&swr_ctx);
		
		//释放掉传入字符串的内存
		(*env)->ReleaseStringUTFChars(env,sourcePath,original_path);
	
}


#ifdef __cplusplus
}
#endif

学习资料参考:FFmpeg示例文档

资源so下载

最后附上编译好的ffmpeg整个so
csdn下载地址:https://download.csdn.net/download/zb52588/10674571
百度云下载地址:
链接:https://pan.baidu.com/s/1fvJNLr-AsDy1ok8XGCn93A 密码:r48t

本实例资源下载(含ffmpeg的ffplay播放)

链接:https://pan.baidu.com/s/1UMvU0t-RInpwgPNC-phupA 密码:yzji

猜你喜欢

转载自blog.csdn.net/zb52588/article/details/82760686