ffmpeg sws_scale函数详解

1. 介绍

如果想在两个AVPixelFormat之间转换,例如将YUV420P 转换到YUV422,亦或者是要改变其大小,放大缩小什么的,就要用到ffmpeg中的swscale函数了,此版本基于ffmpeg 3.3.3版本开发

1. AVPixelFormat定义


    
    
  1. enum AVPixelFormat {
  2. AV_PIX_FMT_NONE = -1,
  3. AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
  4. AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
  5. AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB…
  6. AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR…
  7. AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
  8. AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
  9. AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
  10. AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
  11. AV_PIX_FMT_GRAY8, ///< Y , 8bpp
  12. AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
  13. AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
  14. AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
  15. AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range
  16. AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range
  17. AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range
  18. # if FF_API_XVMC
  19. AV_PIX_FMT_XVMC_MPEG2_MC, ///< XVideo Motion Acceleration via common packet passing
  20. AV_PIX_FMT_XVMC_MPEG2_IDCT,
  21. AV_PIX_FMT_XVMC = AV_PIX_FMT_XVMC_MPEG2_IDCT,
  22. #endif /* FF_API_XVMC */
  23. AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
  24. AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
  25. AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
  26. AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
  27. AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
  28. AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
  29. AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
  30. AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
  31. AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
  32. AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
  33. AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB…
  34. AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA…
  35. AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR…
  36. AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA…
  37. AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
  38. AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
  39. AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
  40. AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
  41. AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
  42. # if FF_API_VDPAU
  43. AV_PIX_FMT_VDPAU_H264, ///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  44. AV_PIX_FMT_VDPAU_MPEG1, ///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  45. AV_PIX_FMT_VDPAU_MPEG2, ///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  46. AV_PIX_FMT_VDPAU_WMV3, ///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  47. AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  48. #endif
  49. AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
  50. AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
  51. AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
  52. AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
  53. AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
  54. AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
  55. AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
  56. AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
  57. AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
  58. AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
  59. # if FF_API_VAAPI
  60. /** @name Deprecated pixel formats */
  61. /**@{*/
  62. AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
  63. AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
  64. AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID
  65. /**@}*/
  66. AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
  67. # else
  68. /**
  69. * Hardware acceleration through VA-API, data[3] contains a
  70. * VASurfaceID.
  71. */
  72. AV_PIX_FMT_VAAPI,
  73. #endif
  74. AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  75. AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  76. AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  77. AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  78. AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  79. AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  80. # if FF_API_VDPAU
  81. AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG-4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  82. #endif
  83. AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
  84. AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
  85. AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
  86. AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
  87. AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
  88. AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
  89. AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
  90. AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
  91. AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
  92. AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
  93. /**
  94. * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
  95. * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
  96. * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
  97. */
  98. AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  99. AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  100. AV_PIX_FMT_YUV420P10BE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  101. AV_PIX_FMT_YUV420P10LE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  102. AV_PIX_FMT_YUV422P10BE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  103. AV_PIX_FMT_YUV422P10LE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  104. AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  105. AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  106. AV_PIX_FMT_YUV444P10BE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  107. AV_PIX_FMT_YUV444P10LE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  108. AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  109. AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  110. AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
  111. AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
  112. AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
  113. AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
  114. AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
  115. AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
  116. AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
  117. AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
  118. AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
  119. AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
  120. AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
  121. AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
  122. AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
  123. AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
  124. AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
  125. AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
  126. AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
  127. AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
  128. AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
  129. AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
  130. AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
  131. AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
  132. AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
  133. AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
  134. AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
  135. AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
  136. AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
  137. AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
  138. AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
  139. AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
  140. AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
  141. AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
  142. AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
  143. AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  144. AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  145. AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  146. AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  147. AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  148. AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  149. AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
  150. AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef
  151. AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
  152. AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian)
  153. AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
  154. AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
  155. AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
  156. /**
  157. * HW acceleration through QSV, data[3] contains a pointer to the
  158. * mfxFrameSurface1 structure.
  159. */
  160. AV_PIX_FMT_QSV,
  161. /**
  162. * HW acceleration though MMAL, data[3] contains a pointer to the
  163. * MMAL_BUFFER_HEADER_T structure.
  164. */
  165. AV_PIX_FMT_MMAL,
  166. AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer
  167. /**
  168. * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
  169. * exactly as for system memory frames.
  170. */
  171. AV_PIX_FMT_CUDA,
  172. AV_PIX_FMT_0RGB= 0x123+ 4, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB… X=unused/undefined
  173. AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX… X=unused/undefined
  174. AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR… X=unused/undefined
  175. AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX… X=unused/undefined
  176. AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  177. AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  178. AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  179. AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  180. AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  181. AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  182. AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  183. AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  184. AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  185. AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  186. AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  187. AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  188. AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
  189. AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
  190. AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
  191. AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
  192. AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range
  193. AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
  194. AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
  195. AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
  196. AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
  197. AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
  198. AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
  199. AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
  200. AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
  201. AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
  202. AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
  203. AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
  204. AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
  205. # if !FF_API_XVMC
  206. AV_PIX_FMT_XVMC, ///< XVideo Motion Acceleration via common packet passing
  207. #endif /* !FF_API_XVMC */
  208. AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
  209. AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
  210. AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
  211. AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
  212. AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
  213. AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
  214. AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
  215. AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
  216. AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
  217. AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
  218. AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
  219. AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
  220. AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
  221. AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
  222. AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
  223. AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
  224. AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
  225. AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
  226. AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian
  227. AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
  228. AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
  229. };

swscale的用法其实还可以参照swscale-example.c来学习使用,主要用到如下三个函数:


    
    
  1. av_warn_unused_result
  2. int sws_init_context (struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
  3. /**
  4. * Free the swscaler context swsContext.
  5. * If swsContext is NULL, then does nothing.
  6. */
  7. void sws_freeContext(struct SwsContext *swsContext);
  8. /**
  9. * Allocate and return an SwsContext. You need it to perform
  10. * scaling/conversion operations using sws_scale().
  11. *
  12. * @param srcW the width of the source image
  13. * @param srcH the height of the source image
  14. * @param srcFormat the source image format
  15. * @param dstW the width of the destination image
  16. * @param dstH the height of the destination image
  17. * @param dstFormat the destination image format
  18. * @param flags specify which algorithm and options to use for rescaling
  19. * @param param extra parameters to tune the used scaler
  20. * For SWS_BICUBIC param[0] and [1] tune the shape of the basis
  21. * function, param[0] tunes f(1) and param[1] f´(1)
  22. * For SWS_GAUSS param[0] tunes the exponent and thus cutoff
  23. * frequency
  24. * For SWS_LANCZOS param[0] tunes the width of the window function
  25. * @return a pointer to an allocated context, or NULL in case of error
  26. * @note this function is to be removed after a saner alternative is
  27. * written
  28. */
  29. struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
  30. int dstW, int dstH, enum AVPixelFormat dstFormat,
  31. int flags, SwsFilter *srcFilter,
  32. SwsFilter *dstFilter, const double *param);

    
    
  1. /**
  2.  * Scale the image slice in srcSlice and put the resulting scaled
  3.  * slice in the image in dst. A slice is a sequence of consecutive
  4.  * rows in an image.
  5.  *
  6.  * Slices have to be provided in sequential order, either in
  7.  * top-bottom or bottom-top order. If slices are provided in
  8.  * non-sequential order the behavior of the function is undefined.
  9.  *
  10.  * @param c         the scaling context previously created with
  11.  *                  sws_getContext()
  12.  * @param srcSlice  the array containing the pointers to the planes of
  13.  *                  the source slice
  14.  * @param srcStride the array containing the strides for each plane of
  15.  *                  the source image
  16.  * @param srcSliceY the position in the source image of the slice to
  17.  *                  process, that is the number (counted starting from
  18.  *                  zero) in the image of the first row of the slice
  19.  * @param srcSliceH the height of the source slice, that is the number
  20.  *                  of rows in the slice
  21.  * @param dst       the array containing the pointers to the planes of
  22.  *                  the destination image
  23.  * @param dstStride the array containing the strides for each plane of
  24.  *                  the destination image
  25.  * @return          the height of the output slice
  26.  */
  27. int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
  28.               const int srcStride[], int srcSliceY, int srcSliceH,
  29.               uint8_t * const dst[], const int dstStride[]);

当然真正干活的当然是 sws_scale 函数了,具体函数参数可以自行看下具体定义,上面贴的代码里面有,需要注意的是第四个参数srcSliceY 这个代表的是第一列要处理的位置,如果要从头开始处理,直接填0即可,下面有一个完整的栗子,我也会上传到github上面,可以下载。


2. 实例

直接上代码了。。。


    
    
  1. /*
  2. * copyright (c) 2017 老衲不出家
  3. *
  4. * 2017-08-11
  5. *
  6. */
  7. #include <stdio.h>
  8. #include <stdint.h>
  9. #include <string.h>
  10. extern "C"
  11. {
  12. #include "libswscale/swscale.h"
  13. #include "libavutil/pixfmt.h"
  14. }
  15. const char *srcFileName = "ds_480x272.yuv";
  16. const char *dstFileName = "ds_720x576.yuv";
  17. int main()
  18. {
  19. // 設定原始 YUV 的長寬
  20. const int in_width = 480;
  21. const int in_height = 272;
  22. // 設定目的 YUV 的長寬
  23. const int out_width = 720;
  24. const int out_height = 576;
  25. const int read_size = in_width * in_height * 3 / 2;
  26. const int write_size = out_width * out_height * 3 / 2;
  27. struct SwsContext *img_convert_ctx = nullptr;
  28. uint8_t *inbuf[ 4];
  29. uint8_t *outbuf[ 4];
  30. int inlinesize[ 4] = { in_width, in_width / 2, in_width / 2, 0 };
  31. int outlinesize[ 4] = { out_width, out_width / 2, out_width / 2, 0 };
  32. uint8_t *ptr_src_yuv_buf = nullptr;
  33. uint8_t *ptr_dst_yuv_buf = nullptr;
  34. ptr_src_yuv_buf = new uint8_t[in_width * in_height * 3/ 2];
  35. ptr_dst_yuv_buf = new uint8_t[out_width * out_height * 3 / 2];
  36. FILE *fin = fopen(srcFileName, "rb");
  37. FILE *fout = fopen(dstFileName, "wb");
  38. if (fin == NULL) {
  39. fprintf( stderr, "open input file %s error.\n", srcFileName);
  40. return -1;
  41. }
  42. if (fout == NULL) {
  43. fprintf( stderr, "open output file %s error.\n", dstFileName);
  44. return -1;
  45. }
  46. inbuf[ 0] = ( uint8_t *) malloc(in_width*in_height);
  47. inbuf[ 1] = ( uint8_t *) malloc(in_width*in_height >> 2);
  48. inbuf[ 2] = ( uint8_t *) malloc(in_width*in_height >> 2);
  49. inbuf[ 3] = NULL;
  50. outbuf[ 0] = ( uint8_t *) malloc(out_width*out_height);
  51. outbuf[ 1] = ( uint8_t *) malloc(out_width*out_height >> 2);
  52. outbuf[ 2] = ( uint8_t *) malloc(out_width*out_height >> 2);
  53. outbuf[ 3] = NULL;
  54. // ********* Initialize software scaling *********
  55. // ********* sws_getContext **********************
  56. img_convert_ctx = sws_getContext(in_width, in_height, AV_PIX_FMT_YUV420P,
  57. out_width, out_height, AV_PIX_FMT_YUV420P, SWS_POINT, nullptr, nullptr, nullptr);
  58. if (img_convert_ctx == NULL) {
  59. fprintf( stderr, "Cannot initialize the conversion context!\n");
  60. return -1;
  61. }
  62. int32_t in_y_size = in_width*in_height;
  63. int32_t out_y_size;
  64. bool bExit = false;
  65. while (!bExit) {
  66. if ((fread(ptr_src_yuv_buf, 1, read_size, fin) < 0) || (feof(fin))) {
  67. bExit = true;
  68. break;
  69. }
  70. memcpy(inbuf[ 0], ptr_src_yuv_buf, in_y_size);
  71. memcpy(inbuf[ 1], ptr_src_yuv_buf + in_y_size, in_y_size/ 4);
  72. memcpy(inbuf[ 2], ptr_src_yuv_buf + in_y_size* 5/ 4, in_y_size / 4);
  73. // ********* 主要的 function ******
  74. // ********* sws_scale ************
  75. sws_scale(img_convert_ctx, inbuf, inlinesize,
  76. 0, in_height, outbuf, outlinesize);
  77. memcpy(ptr_dst_yuv_buf, outbuf[ 0], out_width*out_height);
  78. memcpy(ptr_dst_yuv_buf + out_width*out_height, outbuf[ 1], out_width*out_height >> 2);
  79. memcpy(ptr_dst_yuv_buf + (out_width*out_height * 5 >> 2), outbuf[ 2], out_width*out_height >> 2);
  80. fwrite(ptr_dst_yuv_buf, 1, write_size, fout);
  81. }
  82. // ********* 結束的 function *******
  83. // ********* sws_freeContext *******
  84. sws_freeContext(img_convert_ctx);
  85. fclose(fin);
  86. fclose(fout);
  87. delete[] ptr_src_yuv_buf;
  88. ptr_src_yuv_buf = nullptr;
  89. delete[] ptr_dst_yuv_buf;
  90. ptr_dst_yuv_buf = nullptr;
  91. return 0;
  92. }

整体就是输入一个yuv文件,然后放大,当然这个函数还可以进行其他很多操作。

git地址:https://github.com/tanningzhong/ffmpeg-sws_scale

转载自:https://blog.csdn.net/tanningzhong/article/details/77101239

1. 介绍

如果想在两个AVPixelFormat之间转换,例如将YUV420P 转换到YUV422,亦或者是要改变其大小,放大缩小什么的,就要用到ffmpeg中的swscale函数了,此版本基于ffmpeg 3.3.3版本开发

1. AVPixelFormat定义


  
  
  1. enum AVPixelFormat {
  2. AV_PIX_FMT_NONE = -1,
  3. AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
  4. AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
  5. AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB…
  6. AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR…
  7. AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
  8. AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
  9. AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
  10. AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
  11. AV_PIX_FMT_GRAY8, ///< Y , 8bpp
  12. AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
  13. AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
  14. AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
  15. AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range
  16. AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range
  17. AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range
  18. # if FF_API_XVMC
  19. AV_PIX_FMT_XVMC_MPEG2_MC, ///< XVideo Motion Acceleration via common packet passing
  20. AV_PIX_FMT_XVMC_MPEG2_IDCT,
  21. AV_PIX_FMT_XVMC = AV_PIX_FMT_XVMC_MPEG2_IDCT,
  22. #endif /* FF_API_XVMC */
  23. AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
  24. AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
  25. AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
  26. AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
  27. AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
  28. AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
  29. AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
  30. AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
  31. AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
  32. AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
  33. AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB…
  34. AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA…
  35. AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR…
  36. AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA…
  37. AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
  38. AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
  39. AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
  40. AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
  41. AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
  42. # if FF_API_VDPAU
  43. AV_PIX_FMT_VDPAU_H264, ///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  44. AV_PIX_FMT_VDPAU_MPEG1, ///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  45. AV_PIX_FMT_VDPAU_MPEG2, ///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  46. AV_PIX_FMT_VDPAU_WMV3, ///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  47. AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  48. #endif
  49. AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
  50. AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
  51. AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
  52. AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
  53. AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
  54. AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
  55. AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
  56. AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
  57. AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
  58. AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
  59. # if FF_API_VAAPI
  60. /** @name Deprecated pixel formats */
  61. /**@{*/
  62. AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
  63. AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
  64. AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID
  65. /**@}*/
  66. AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
  67. # else
  68. /**
  69. * Hardware acceleration through VA-API, data[3] contains a
  70. * VASurfaceID.
  71. */
  72. AV_PIX_FMT_VAAPI,
  73. #endif
  74. AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  75. AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  76. AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  77. AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  78. AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  79. AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  80. # if FF_API_VDPAU
  81. AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG-4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
  82. #endif
  83. AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
  84. AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
  85. AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
  86. AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
  87. AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
  88. AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
  89. AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
  90. AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
  91. AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
  92. AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
  93. /**
  94. * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
  95. * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
  96. * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
  97. */
  98. AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  99. AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  100. AV_PIX_FMT_YUV420P10BE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  101. AV_PIX_FMT_YUV420P10LE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  102. AV_PIX_FMT_YUV422P10BE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  103. AV_PIX_FMT_YUV422P10LE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  104. AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  105. AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  106. AV_PIX_FMT_YUV444P10BE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  107. AV_PIX_FMT_YUV444P10LE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  108. AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  109. AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  110. AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
  111. AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
  112. AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
  113. AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
  114. AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
  115. AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
  116. AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
  117. AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
  118. AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
  119. AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
  120. AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
  121. AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
  122. AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
  123. AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
  124. AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
  125. AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
  126. AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
  127. AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
  128. AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
  129. AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
  130. AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
  131. AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
  132. AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
  133. AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
  134. AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
  135. AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
  136. AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
  137. AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
  138. AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
  139. AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
  140. AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
  141. AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
  142. AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
  143. AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  144. AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  145. AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  146. AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  147. AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
  148. AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
  149. AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
  150. AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef
  151. AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
  152. AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian)
  153. AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
  154. AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
  155. AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
  156. /**
  157. * HW acceleration through QSV, data[3] contains a pointer to the
  158. * mfxFrameSurface1 structure.
  159. */
  160. AV_PIX_FMT_QSV,
  161. /**
  162. * HW acceleration though MMAL, data[3] contains a pointer to the
  163. * MMAL_BUFFER_HEADER_T structure.
  164. */
  165. AV_PIX_FMT_MMAL,
  166. AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer
  167. /**
  168. * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
  169. * exactly as for system memory frames.
  170. */
  171. AV_PIX_FMT_CUDA,
  172. AV_PIX_FMT_0RGB= 0x123+ 4, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB… X=unused/undefined
  173. AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX… X=unused/undefined
  174. AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR… X=unused/undefined
  175. AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX… X=unused/undefined
  176. AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  177. AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  178. AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
  179. AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
  180. AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  181. AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  182. AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
  183. AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
  184. AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  185. AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  186. AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
  187. AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
  188. AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
  189. AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
  190. AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
  191. AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
  192. AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range
  193. AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
  194. AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
  195. AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
  196. AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
  197. AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
  198. AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
  199. AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
  200. AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
  201. AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
  202. AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
  203. AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
  204. AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
  205. # if !FF_API_XVMC
  206. AV_PIX_FMT_XVMC, ///< XVideo Motion Acceleration via common packet passing
  207. #endif /* !FF_API_XVMC */
  208. AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
  209. AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
  210. AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
  211. AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
  212. AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
  213. AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
  214. AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
  215. AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
  216. AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
  217. AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
  218. AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
  219. AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
  220. AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
  221. AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
  222. AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
  223. AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
  224. AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
  225. AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
  226. AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian
  227. AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
  228. AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
  229. };

swscale的用法其实还可以参照swscale-example.c来学习使用,主要用到如下三个函数:


  
  
  1. av_warn_unused_result
  2. int sws_init_context (struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
  3. /**
  4. * Free the swscaler context swsContext.
  5. * If swsContext is NULL, then does nothing.
  6. */
  7. void sws_freeContext(struct SwsContext *swsContext);
  8. /**
  9. * Allocate and return an SwsContext. You need it to perform
  10. * scaling/conversion operations using sws_scale().
  11. *
  12. * @param srcW the width of the source image
  13. * @param srcH the height of the source image
  14. * @param srcFormat the source image format
  15. * @param dstW the width of the destination image
  16. * @param dstH the height of the destination image
  17. * @param dstFormat the destination image format
  18. * @param flags specify which algorithm and options to use for rescaling
  19. * @param param extra parameters to tune the used scaler
  20. * For SWS_BICUBIC param[0] and [1] tune the shape of the basis
  21. * function, param[0] tunes f(1) and param[1] f´(1)
  22. * For SWS_GAUSS param[0] tunes the exponent and thus cutoff
  23. * frequency
  24. * For SWS_LANCZOS param[0] tunes the width of the window function
  25. * @return a pointer to an allocated context, or NULL in case of error
  26. * @note this function is to be removed after a saner alternative is
  27. * written
  28. */
  29. struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
  30. int dstW, int dstH, enum AVPixelFormat dstFormat,
  31. int flags, SwsFilter *srcFilter,
  32. SwsFilter *dstFilter, const double *param);

  
  
  1. /**
  2.  * Scale the image slice in srcSlice and put the resulting scaled
  3.  * slice in the image in dst. A slice is a sequence of consecutive
  4.  * rows in an image.
  5.  *
  6.  * Slices have to be provided in sequential order, either in
  7.  * top-bottom or bottom-top order. If slices are provided in
  8.  * non-sequential order the behavior of the function is undefined.
  9.  *
  10.  * @param c         the scaling context previously created with
  11.  *                  sws_getContext()
  12.  * @param srcSlice  the array containing the pointers to the planes of
  13.  *                  the source slice
  14.  * @param srcStride the array containing the strides for each plane of
  15.  *                  the source image
  16.  * @param srcSliceY the position in the source image of the slice to
  17.  *                  process, that is the number (counted starting from
  18.  *                  zero) in the image of the first row of the slice
  19.  * @param srcSliceH the height of the source slice, that is the number
  20.  *                  of rows in the slice
  21.  * @param dst       the array containing the pointers to the planes of
  22.  *                  the destination image
  23.  * @param dstStride the array containing the strides for each plane of
  24.  *                  the destination image
  25.  * @return          the height of the output slice
  26.  */
  27. int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
  28.               const int srcStride[], int srcSliceY, int srcSliceH,
  29.               uint8_t * const dst[], const int dstStride[]);

当然真正干活的当然是 sws_scale 函数了,具体函数参数可以自行看下具体定义,上面贴的代码里面有,需要注意的是第四个参数srcSliceY 这个代表的是第一列要处理的位置,如果要从头开始处理,直接填0即可,下面有一个完整的栗子,我也会上传到github上面,可以下载。


2. 实例

直接上代码了。。。


  
  
  1. /*
  2. * copyright (c) 2017 老衲不出家
  3. *
  4. * 2017-08-11
  5. *
  6. */
  7. #include <stdio.h>
  8. #include <stdint.h>
  9. #include <string.h>
  10. extern "C"
  11. {
  12. #include "libswscale/swscale.h"
  13. #include "libavutil/pixfmt.h"
  14. }
  15. const char *srcFileName = "ds_480x272.yuv";
  16. const char *dstFileName = "ds_720x576.yuv";
  17. int main()
  18. {
  19. // 設定原始 YUV 的長寬
  20. const int in_width = 480;
  21. const int in_height = 272;
  22. // 設定目的 YUV 的長寬
  23. const int out_width = 720;
  24. const int out_height = 576;
  25. const int read_size = in_width * in_height * 3 / 2;
  26. const int write_size = out_width * out_height * 3 / 2;
  27. struct SwsContext *img_convert_ctx = nullptr;
  28. uint8_t *inbuf[ 4];
  29. uint8_t *outbuf[ 4];
  30. int inlinesize[ 4] = { in_width, in_width / 2, in_width / 2, 0 };
  31. int outlinesize[ 4] = { out_width, out_width / 2, out_width / 2, 0 };
  32. uint8_t *ptr_src_yuv_buf = nullptr;
  33. uint8_t *ptr_dst_yuv_buf = nullptr;
  34. ptr_src_yuv_buf = new uint8_t[in_width * in_height * 3/ 2];
  35. ptr_dst_yuv_buf = new uint8_t[out_width * out_height * 3 / 2];
  36. FILE *fin = fopen(srcFileName, "rb");
  37. FILE *fout = fopen(dstFileName, "wb");
  38. if (fin == NULL) {
  39. fprintf( stderr, "open input file %s error.\n", srcFileName);
  40. return -1;
  41. }
  42. if (fout == NULL) {
  43. fprintf( stderr, "open output file %s error.\n", dstFileName);
  44. return -1;
  45. }
  46. inbuf[ 0] = ( uint8_t *) malloc(in_width*in_height);
  47. inbuf[ 1] = ( uint8_t *) malloc(in_width*in_height >> 2);
  48. inbuf[ 2] = ( uint8_t *) malloc(in_width*in_height >> 2);
  49. inbuf[ 3] = NULL;
  50. outbuf[ 0] = ( uint8_t *) malloc(out_width*out_height);
  51. outbuf[ 1] = ( uint8_t *) malloc(out_width*out_height >> 2);
  52. outbuf[ 2] = ( uint8_t *) malloc(out_width*out_height >> 2);
  53. outbuf[ 3] = NULL;
  54. // ********* Initialize software scaling *********
  55. // ********* sws_getContext **********************
  56. img_convert_ctx = sws_getContext(in_width, in_height, AV_PIX_FMT_YUV420P,
  57. out_width, out_height, AV_PIX_FMT_YUV420P, SWS_POINT, nullptr, nullptr, nullptr);
  58. if (img_convert_ctx == NULL) {
  59. fprintf( stderr, "Cannot initialize the conversion context!\n");
  60. return -1;
  61. }
  62. int32_t in_y_size = in_width*in_height;
  63. int32_t out_y_size;
  64. bool bExit = false;
  65. while (!bExit) {
  66. if ((fread(ptr_src_yuv_buf, 1, read_size, fin) < 0) || (feof(fin))) {
  67. bExit = true;
  68. break;
  69. }
  70. memcpy(inbuf[ 0], ptr_src_yuv_buf, in_y_size);
  71. memcpy(inbuf[ 1], ptr_src_yuv_buf + in_y_size, in_y_size/ 4);
  72. memcpy(inbuf[ 2], ptr_src_yuv_buf + in_y_size* 5/ 4, in_y_size / 4);
  73. // ********* 主要的 function ******
  74. // ********* sws_scale ************
  75. sws_scale(img_convert_ctx, inbuf, inlinesize,
  76. 0, in_height, outbuf, outlinesize);
  77. memcpy(ptr_dst_yuv_buf, outbuf[ 0], out_width*out_height);
  78. memcpy(ptr_dst_yuv_buf + out_width*out_height, outbuf[ 1], out_width*out_height >> 2);
  79. memcpy(ptr_dst_yuv_buf + (out_width*out_height * 5 >> 2), outbuf[ 2], out_width*out_height >> 2);
  80. fwrite(ptr_dst_yuv_buf, 1, write_size, fout);
  81. }
  82. // ********* 結束的 function *******
  83. // ********* sws_freeContext *******
  84. sws_freeContext(img_convert_ctx);
  85. fclose(fin);
  86. fclose(fout);
  87. delete[] ptr_src_yuv_buf;
  88. ptr_src_yuv_buf = nullptr;
  89. delete[] ptr_dst_yuv_buf;
  90. ptr_dst_yuv_buf = nullptr;
  91. return 0;
  92. }

整体就是输入一个yuv文件,然后放大,当然这个函数还可以进行其他很多操作。

git地址:https://github.com/tanningzhong/ffmpeg-sws_scale

转载自:https://blog.csdn.net/tanningzhong/article/details/77101239

猜你喜欢

转载自blog.csdn.net/baidu_38172402/article/details/80894533
今日推荐