ffmpeg关于音频的总结

转载:https://blog.csdn.net/zhuweigangzwg/article/details/51499123

一:关于音频分片的问题

  1. enum AVSampleFormat {
  2. AV_SAMPLE_FMT_NONE = -1,
  3. AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
  4. AV_SAMPLE_FMT_S16, ///< signed 16 bits
  5. AV_SAMPLE_FMT_S32, ///< signed 32 bits
  6. AV_SAMPLE_FMT_FLT, ///< float
  7. AV_SAMPLE_FMT_DBL, ///< double
  8. AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
  9. AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
  10. AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
  11. AV_SAMPLE_FMT_FLTP, ///< float, planar
  12. AV_SAMPLE_FMT_DBLP, ///< double, planar
  13. AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
  14. };
  15. int sample_size = av_get_bytes_per_sample(sample_fmt);
  16. int planar = av_sample_fmt_is_planar(sample_fmt);
  17. //上面带P的都是分片的,和不分片的区别是:看下ffmpeg源代码
  18. line_size = planar ? FFALIGN(nb_samples * sample_size,align) :
  19. FFALIGN(nb_samples * sample_size * nb_channels, align);
  20. if (linesize)
  21. {
  22. *linesize = line_size;
  23. }
  24. return planar ? line_size * nb_channels : line_size;
1:无论是不是分片的数据总量是相同的.
2:分片的存储在内存中linesize如果两声道则左右分开占用linesize[0]和linesize[1].
3:不是分片的存储在内存中两声道不分开,左右左右....这样存储,只占用linesize[0].


二:音频信息
如果音频,样本:s16;采样率:44100;声道:2。
av_get_bytes_per_sample(s16) == 2;
1:假设从麦克风或者文件读出来的通过av_read_frame得到一个数据总量是88200个字节。
这个88200个字节是和帧无关的数据量。
2:如果接下来需要将这些数据编码成:
ffmpeg源码如下,AAC格式nb_samples和frame_size是1024,其他如下:

  1. switch (id) {
  2. case AV_CODEC_ID_ADPCM_ADX: return 32;
  3. case AV_CODEC_ID_ADPCM_IMA_QT: return 64;
  4. case AV_CODEC_ID_ADPCM_EA_XAS: return 128;
  5. case AV_CODEC_ID_AMR_NB:
  6. case AV_CODEC_ID_EVRC:
  7. case AV_CODEC_ID_GSM:
  8. case AV_CODEC_ID_QCELP:
  9. case AV_CODEC_ID_RA_288: return 160;
  10. case AV_CODEC_ID_AMR_WB:
  11. case AV_CODEC_ID_GSM_MS: return 320;
  12. case AV_CODEC_ID_MP1: return 384;
  13. case AV_CODEC_ID_ATRAC1: return 512;
  14. case AV_CODEC_ID_ATRAC3: return 1024 * framecount;
  15. case AV_CODEC_ID_ATRAC3P: return 2048;
  16. case AV_CODEC_ID_MP2:
  17. case AV_CODEC_ID_MUSEPACK7: return 1152;
  18. case AV_CODEC_ID_AC3: return 1536;
  19. }
1) AAC:
nb_samples和frame_size = 1024
一帧数据量:1024*2*av_get_bytes_per_sample(s16) = 4096个字节。
会编码:88200/(1024*2*av_get_bytes_per_sample(s16)) = 21.5帧数据
2) MP3:
nb_samples和frame_size = 1152
一帧数据量:1152*2*av_get_bytes_per_sample(s16) = 4608个字节。
MP3:则会编码:88200/(1152*2*av_get_bytes_per_sample(s16)) = 19.1帧数据


无论要编码成AAC还MP3都需要用到ffmpeg的fifo或者AVAudioFifo做数据缓冲。
3:持续时间方面
1) AAC
音频帧的播放时间=一个AAC帧对应的采样样本的个数/采样频率(单位为s)
一帧 1024个 sample。采样率 Samplerate 44100KHz,每秒44100个sample, 所以根据公式   音频帧的播放时间=一个AAC帧对应的采样样本的个数/采样频率
当前AAC一帧的播放时间是= 1024*1000000/44100= 22.2ms(单位为ms)
2) MP3
mp3 每帧均为1152个字节, 则:
frame_duration = 1152 * 1000000 / sample_rate
例如:sample_rate = 44100HZ时,计算出的时长为26.122ms,这就是经常听到的mp3每帧播放时间固定为26ms的由来


三:关于avcodec_fill_audio_frame
看下面ffmpeg源码:
AVFrame的nb_samples需要算出一帧数据,然后和buf做比较是否满足。剩下在这个函数里面没什么用,

  1. int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
  2. enum AVSampleFormat sample_fmt, const uint8_t *buf,
  3. int buf_size, int align)
  4. {
  5. int ch, planar, needed_size, ret = 0;
  6. needed_size = av_samples_get_buffer_size( NULL, nb_channels,
  7. frame->nb_samples, sample_fmt,
  8. align);
  9. if (buf_size < needed_size)
  10. return AVERROR(EINVAL);
  11. planar = av_sample_fmt_is_planar(sample_fmt);
  12. if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
  13. if (!(frame->extended_data = av_mallocz_array(nb_channels,
  14. sizeof(*frame->extended_data))))
  15. return AVERROR(ENOMEM);
  16. } else {
  17. frame->extended_data = frame->data;
  18. }
  19. if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[ 0],
  20. ( uint8_t *)( intptr_t)buf, nb_channels, frame->nb_samples,
  21. sample_fmt, align)) < 0) {
  22. if (frame->extended_data != frame->data)
  23. av_freep(&frame->extended_data);
  24. return ret;
  25. }
  26. if (frame->extended_data != frame->data) {
  27. for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
  28. frame->data[ch] = frame->extended_data[ch];
  29. }
  30. return ret;
  31. }
  32. //这里关于分片的问题在上面已经讲过了。
  33. int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
  34. enum AVSampleFormat sample_fmt, int align)
  35. {
  36. int line_size;
  37. int sample_size = av_get_bytes_per_sample(sample_fmt);
  38. int planar = av_sample_fmt_is_planar(sample_fmt);
  39. /* validate parameter ranges */
  40. if (!sample_size || nb_samples <= 0 || nb_channels <= 0)
  41. return AVERROR(EINVAL);
  42. /* auto-select alignment if not specified */
  43. if (!align) {
  44. if (nb_samples > INT_MAX - 31)
  45. return AVERROR(EINVAL);
  46. align = 1;
  47. nb_samples = FFALIGN(nb_samples, 32);
  48. }
  49. /* check for integer overflow */
  50. if (nb_channels > INT_MAX / align ||
  51. ( int64_t)nb_channels * nb_samples > (INT_MAX - (align * nb_channels)) / sample_size)
  52. return AVERROR(EINVAL);
  53. line_size = planar ? FFALIGN(nb_samples * sample_size, align) :
  54. FFALIGN(nb_samples * sample_size * nb_channels, align);
  55. if (linesize)
  56. *linesize = line_size;
  57. return planar ? line_size * nb_channels : line_size;
  58. }


四:关于swr_convert和resample后的数据量

  1. //开始转换
  2. /*
  3. * @param s allocated Swr context, with parameters set
  4. * @param out output buffers, only the first one need be set in case of packed audio
  5. * @param out_count amount of space available for output in samples per channel
  6. * @param in input buffers, only the first one need to be set in case of packed audio
  7. * @param in_count number of input samples available in one channel
  8. *
  9. * @return number of samples output per channel, negative value on error
  10. */
  11. 这里看到:
  12. out_count是为每一个声道resample后准备的buf大小
  13. in_count是输入的每一个声道的buf长度,如果是摄像头采集的用av_read_frame得到的packet.size/channel.
  14. //开始转换(如果是分片的关于in_size需要测试后改正,或本代码可以适应)
  15. ret = swr_convert(swrcontext, pOutputFrame->data,buf_size_out/Channel_out,
  16. ( const uint8_t**)pInputFrame->data,buf_ActualLen_in/Channel_in);
  17. //得到resample后的buf大小(如果是分片的关于in_size需要测试后改正,或本代码可以适应)
  18. int outsize = ret * Channel_out;

猜你喜欢

转载自blog.csdn.net/u014162133/article/details/81015427