Intel Media SDK解码API封装

文章目录

接口

我们在使用过程中,主要用到一下几个API:

  • 1、初始化:virtual mfxStatus Init_Intel(mfxU32 nType, int nWidth, int nHight);
  • 2、回调显示:long setparam(IntelPicCallBack cb, void * p);
  • 3、解码:virtual mfxStatus DecodeFrame(const char * pBuf, long nLen, long ntype, unsigned __int64 pts, long pts_ref_distance, unsigned __int64 dts);
  • 4、释放解码:virtual void Close();
    其中各API的参数,不详述,都能见名知义。

代码

先上封装的代码:
头文件:pipeline_decode.h

/******************************************************************************\
Copyright (c) 2005-2018, Intel Corporation
All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.

2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.

3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

This sample was distributed or derived from the Intel's Media Samples package.
The original version of this sample may be obtained from https://software.intel.com/en-us/intel-media-server-studio
or https://software.intel.com/en-us/media-client-solutions-support.
\**********************************************************************************/

#ifndef __PIPELINE_DECODE_H__
#define __PIPELINE_DECODE_H__

#include "sample_defs.h"

#include <vector>
#include "hw_device.h"
#include "decode_render.h"
#include "mfx_buffering.h"
#include <memory>

#include "sample_utils.h"
#include "base_allocator.h"

#include "mfxmvc.h"
#include "mfxjpeg.h"
#include "mfxplugin.h"
#include "mfxplugin++.h"
#include "mfxvideo.h"
#include "mfxvideo++.h"
#include "mfxvp8.h"

#include "plugin_loader.h"
#include "general_allocator.h"

#include "xlock.h"

#ifndef MFX_VERSION
#error MFX_VERSION not defined
#endif

enum MemType {
    SYSTEM_MEMORY = 0x00,
    D3D9_MEMORY   = 0x01,
    D3D11_MEMORY  = 0x02,
};

enum eWorkMode {
  MODE_PERFORMANCE,
  MODE_RENDERING,
  MODE_FILE_DUMP
};

#if MFX_VERSION >= 1022
enum eDecoderPostProc {
  MODE_DECODER_POSTPROC_AUTO  = 0x1,
  MODE_DECODER_POSTPROC_FORCE = 0x2
};
#endif //MFX_VERSION >= 1022

struct sInputParams
{
    mfxU32 videoType;
    eWorkMode mode;
    MemType memType;
    bool    bUseHWLib; // true if application wants to use HW mfx library
    bool    bIsMVC; // true if Multi-View Codec is in use
    bool    bLowLat; // low latency mode
    bool    bCalLat; // latency calculation
    bool    bUseFullColorRange; //whether to use full color range
    mfxU16  nMaxFPS; //rendering limited by certain fps
    mfxU32  nWallCell;
    mfxU32  nWallW; //number of windows located in each row
    mfxU32  nWallH; //number of windows located in each column
    mfxU32  nWallMonitor; //monitor id, 0,1,.. etc
    bool    bWallNoTitle; //whether to show title for each window with fps value
#if MFX_VERSION >= 1022
    mfxU16  nDecoderPostProcessing;
#endif //MFX_VERSION >= 1022

    mfxU32  numViews; // number of views for Multi-View Codec
    mfxU32  nRotation; // rotation for Motion JPEG Codec
    mfxU16  nAsyncDepth; // asyncronous queue
    mfxU16  nTimeout; // timeout in seconds
    mfxU16  gpuCopy; // GPU Copy mode (three-state option)
    bool    bSoftRobustFlag;
    mfxU16  nThreadsNum;
    mfxI32  SchedulingType;
    mfxI32  Priority;

    mfxU16  Width;
    mfxU16  Height;

    mfxU32  fourcc;
    mfxU16  chromaType;
    mfxU32  nFrames;
    mfxU16  eDeinterlace;
    bool    outI420;

    bool    bPerfMode;
    bool    bRenderWin;
    mfxU32  nRenderWinX;
    mfxU32  nRenderWinY;
#if (MFX_VERSION >= 1025)
    bool    bErrorReport;
#endif

    mfxI32  monitorType;
#if defined(LIBVA_SUPPORT)
    mfxI32  libvaBackend;
#endif // defined(MFX_LIBVA_SUPPORT)

    msdk_char     strSrcFile[MSDK_MAX_FILENAME_LEN];
    msdk_char     strDstFile[MSDK_MAX_FILENAME_LEN];
    sPluginParams pluginParams;

    sInputParams()
    {
        MSDK_ZERO_MEMORY(*this);
    }
};

template<>struct mfx_ext_buffer_id<mfxExtMVCSeqDesc>{
    enum {id = MFX_EXTBUFF_MVC_SEQ_DESC};
};

struct CPipelineStatistics
{
    CPipelineStatistics():
        m_input_count(0),
        m_output_count(0),
        m_synced_count(0)
    {

    }
    virtual ~CPipelineStatistics(){}

    mfxU32 m_input_count;     // number of received incoming packets (frames or bitstreams)
    mfxU32 m_output_count;    // number of delivered outgoing packets (frames or bitstreams)
    mfxU32 m_synced_count;

private:
    CPipelineStatistics(const CPipelineStatistics&);
    void operator=(const CPipelineStatistics&);
};

typedef void(__stdcall * IntelPicCallBack) (unsigned char * pbuf, long  nsize, long ntype, long nwidth, long nheight, unsigned __int64 npts, long pts_ref_distance, long delay, void * lparam);
class CDecodingPipeline:public CBuffering, public CPipelineStatistics
{
public:
    CDecodingPipeline();
    virtual ~CDecodingPipeline();

	long setparam(IntelPicCallBack cb, void * p);
	BOOL IsHwAcclSupported(void);
    virtual mfxStatus Init_Intel(mfxU32 nType, int nWidth, int nHight);
	virtual mfxStatus DecodeHeader();
    virtual mfxStatus DecodeFrame(const char * pBuf, long  nLen, long ntype, unsigned __int64 pts, long pts_ref_distance, unsigned __int64 dts);
    virtual void Close();
    virtual mfxStatus ResetDecoder(sInputParams *pParams);

    void SetMultiView();
    void SetExtBuffersFlag()       { m_bIsExtBuffers = true; }
    mfxU64 GetTotalBytesProcessed() { return totalBytesProcessed + m_mfxBS.DataOffset; }

protected: // functions
    virtual mfxStatus InitMfxParams();

    // function for allocating a specific external buffer
    template <typename Buffer>
    mfxStatus AllocateExtBuffer();
    virtual void DeleteExtBuffers();

    virtual mfxStatus AllocateExtMVCBuffers();
    virtual void    DeallocateExtMVCBuffers();

    virtual void AttachExtParam();

    virtual mfxStatus CreateAllocator();
    virtual mfxStatus AllocFrames();
    virtual void DeleteFrames();
    virtual void DeleteAllocator();

    /** \brief Performs SyncOperation on the current output surface with the specified timeout.
     *
     * @return MFX_ERR_NONE Output surface was successfully synced and delivered.
     * @return MFX_ERR_MORE_DATA Array of output surfaces is empty, need to feed decoder.
     * @return MFX_WRN_IN_EXECUTION Specified timeout have elapsed.
     * @return MFX_ERR_UNKNOWN An error has occurred.
     */
    virtual mfxStatus SyncOutputSurface(mfxU32 wait);
    virtual mfxStatus DeliverOutput(mfxFrameSurface1* frame);

    virtual mfxStatus DeliverLoop(void);

    static unsigned int MFX_STDCALL DeliverThreadFunc(void* ctx);

protected: // variables
    mfxBitstream            m_mfxBS; // contains encoded data
    mfxU64 totalBytesProcessed;

    MFXVideoSession         m_mfxSession;
    mfxIMPL                 m_impl;
    MFXVideoDECODE*         m_pmfxDEC;
    mfxVideoParam           m_mfxVideoParams;

    std::unique_ptr<MFXVideoUSER>  m_pUserModule;
	std::unique_ptr<MFXPlugin> m_pPlugin;
    std::vector<mfxExtBuffer *> m_ExtBuffers;
    std::vector<mfxExtBuffer *> m_ExtBuffersMfxBS;
#if MFX_VERSION >= 1022
    mfxExtDecVideoProcessing m_DecoderPostProcessing;
#endif //MFX_VERSION >= 1022

#if (MFX_VERSION >= 1025)
    mfxExtDecodeErrorReport m_DecodeErrorReport;
#endif

    GeneralAllocator*       m_pGeneralAllocator;
    mfxAllocatorParams*     m_pmfxAllocatorParams;
    MemType                 m_memType;      // memory type of surfaces to use
    bool                    m_bExternalAlloc; // use memory allocator as external for Media SDK
    bool                    m_bDecOutSysmem; // use system memory between Decoder, if false - video memory
    mfxFrameAllocResponse   m_mfxResponse; // memory allocation response for decoder

    msdkFrameSurface*       m_pCurrentFreeSurface; // surface detached from free surfaces array
    msdkOutputSurface*      m_pCurrentFreeOutputSurface; // surface detached from free output surfaces array
    msdkOutputSurface*      m_pCurrentOutputSurface; // surface detached from output surfaces array

    MSDKSemaphore*          m_pDeliverOutputSemaphore; // to access to DeliverOutput method
    MSDKEvent*              m_pDeliveredEvent; // to signal when output surfaces will be processed
    mfxStatus               m_error; // error returned by DeliverOutput method
    bool                    m_bStopDeliverLoop;

    eWorkMode               m_eWorkMode; // work mode for the pipeline
    bool                    m_bIsMVC; // enables MVC mode (need to support several files as an output)
    bool                    m_bIsExtBuffers; // indicates if external buffers were allocated
    bool                    m_bIsVideoWall; // indicates special mode: decoding will be done in a loop
    bool                    m_bIsCompleteFrame;

    bool                    m_bPrintLatency;
    bool                    m_bOutI420;


    mfxU32                  m_nTimeout; // enables timeout for video playback, measured in seconds
    mfxU16                  m_nMaxFps; // limit of fps, if isn't specified equal 0.
    mfxU32                  m_nFrames; //limit number of output frames

    mfxU16                  m_diMode;
    bool                    m_bSoftRobustFlag;

	msdk_tick               m_startTick;
	msdk_tick               m_delayTicks;

	mfxU32				m_mVideoType;

    mfxU32                  m_export_mode;
    mfxI32                  m_monitorType;

    bool                    m_bResetFileWriter;
    bool                    m_bResetFileReader;

	IntelPicCallBack m_cb;
	void * m_pcontext;
	unsigned char *m_pOutBuf;
	bool m_bInit;
	bool m_bInitMfx;
	CRITICAL_SECTION m_memLock;
	FILE *fp;
	unsigned __int64 m_last_pts;
	long m_nType;
	unsigned __int64 m_nPts;
	long m_nPts_ref_distance;
	unsigned __int64 m_nDts;
private:
    CDecodingPipeline(const CDecodingPipeline&);
    void operator=(const CDecodingPipeline&);
};

#endif // __PIPELINE_DECODE_H__

源文件:pipeline_decode.cpp

/******************************************************************************\
Copyright (c) 2005-2018, Intel Corporation
All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.

2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.

3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

This sample was distributed or derived from the Intel's Media Samples package.
The original version of this sample may be obtained from https://software.intel.com/en-us/intel-media-server-studio
or https://software.intel.com/en-us/media-client-solutions-support.
\**********************************************************************************/

#include "mfx_samples_config.h"
#include <algorithm>

#if defined(_WIN32) || defined(_WIN64)
#include <tchar.h>
#include <windows.h>
#endif

#include <ctime>
#include <algorithm>
#include "pipeline_decode.h"
#include "sysmem_allocator.h"

#include "version.h"
#include "libyuv/convert.h"

#pragma warning(disable : 4100)

#define __SYNC_WA // avoid sync issue on Media SDK side

#ifndef MFX_VERSION
#error MFX_VERSION not defined
#endif

#define  MAX_BUFFER (8 * 1024*1024)

CDecodingPipeline::CDecodingPipeline()
{
	m_nFrames = 0;
	m_export_mode = 0;
	MSDK_ZERO_MEMORY(m_mfxBS);

	m_pmfxDEC = NULL;
	m_impl = 0;

	MSDK_ZERO_MEMORY(m_mfxVideoParams);

	m_pGeneralAllocator = NULL;
	m_pmfxAllocatorParams = NULL;
	m_memType = SYSTEM_MEMORY;
	m_bExternalAlloc = false;
	m_bDecOutSysmem = false;
	m_bSoftRobustFlag = false;

	MSDK_ZERO_MEMORY(m_mfxResponse);

	m_pCurrentFreeSurface = NULL;
	m_pCurrentFreeOutputSurface = NULL;
	m_pCurrentOutputSurface = NULL;

	m_pDeliverOutputSemaphore = NULL;
	m_pDeliveredEvent = NULL;
	m_error = MFX_ERR_NONE;
	m_bStopDeliverLoop = false;

	m_eWorkMode = MODE_PERFORMANCE;
	m_bIsMVC = false;
	m_bIsExtBuffers = false;
	m_bIsVideoWall = false;
	m_bIsCompleteFrame = false;
	m_bPrintLatency = false;

	m_nTimeout = 0;
	m_nMaxFps = 0;

	m_diMode = 0;

	m_bResetFileWriter = false;
	m_bResetFileReader = false;

	m_startTick = 0;
	m_delayTicks = 0;

#if MFX_VERSION >= 1022
	MSDK_ZERO_MEMORY(m_DecoderPostProcessing);
	m_DecoderPostProcessing.Header.BufferId = MFX_EXTBUFF_DEC_VIDEO_PROCESSING;
	m_DecoderPostProcessing.Header.BufferSz = sizeof(mfxExtDecVideoProcessing);
#endif //MFX_VERSION >= 1022

#if (MFX_VERSION >= 1025)
	MSDK_ZERO_MEMORY(m_DecodeErrorReport);
	m_DecodeErrorReport.Header.BufferId = MFX_EXTBUFF_DECODE_ERROR_REPORT;
#endif

	m_bOutI420 = false;

	m_monitorType = 0;
	totalBytesProcessed = 0;
	m_pOutBuf = NULL;
	//m_pOutBuf = new unsigned char[MAX_BUFFER];
	m_bInit = false;
	m_bInitMfx = false;
	m_mVideoType = 0;

	//计算时间戳
	m_last_pts = 0;

	//fp = fopen("E:\\gpu.yuv", "wb+");
	::InitializeCriticalSection(&m_memLock);
}

CDecodingPipeline::~CDecodingPipeline()
{
	//fclose(fp);
	Close();
	if (NULL != m_pOutBuf)
	{
		delete m_pOutBuf;
		m_pOutBuf = NULL;
	}
	::DeleteCriticalSection(&m_memLock);
}

long CDecodingPipeline::setparam(IntelPicCallBack cb, void * p)
{
	m_cb = cb;
	m_pcontext = p;
	return 0;
}

BOOL CDecodingPipeline::IsHwAcclSupported(void)
{
	mfxIMPL impl = MFX_IMPL_AUTO;
	mfxSession session = NULL;
	mfxVersion ver = { MFX_VERSION_MINOR, MFX_VERSION_MAJOR };

	MFXInit(MFX_IMPL_AUTO, &ver, &session);
	MFXQueryIMPL(session, &impl);
	MFXClose(session);

	return impl == MFX_IMPL_HARDWARE ? TRUE : FALSE;
}

mfxStatus CDecodingPipeline::Init_Intel(mfxU32 nType, int nWidth, int nHight)
{
	sInputParams        Params;   // input parameters from command line
	Params.bUseHWLib = true;
	Params.bUseFullColorRange = false;
	Params.nAsyncDepth = 4;
	Params.nMaxFPS = 25;
	Params.bUseHWLib = true;
	Params.gpuCopy = MFX_GPUCOPY_ON;
	Params.mode = MODE_FILE_DUMP;
	Params.videoType = nType;
	Params.memType = SYSTEM_MEMORY;

	mfxStatus sts = MFX_ERR_NONE;

	if (!m_bInit || m_mVideoType != nType)
	{
		Close();
		
		m_mVideoType = nType;

		// Initializing file reader
		totalBytesProcessed = 0;

		mfxInitParam initPar;
		mfxExtThreadsParam threadsPar;
		mfxVersion version;     // real API version with which library is initialized
		m_memType = Params.memType;
		Params.nMaxFPS = 25;
		m_nMaxFps = Params.nMaxFPS;
		MSDK_ZERO_MEMORY(m_mfxBS);
		MSDK_ZERO_MEMORY(initPar);
		MSDK_ZERO_MEMORY(threadsPar);

		// we set version to 1.0 and later we will query actual version of the library which will got leaded
		initPar.Version.Major = 1;
		initPar.Version.Minor = 0;

		initPar.GPUCopy = MFX_GPUCOPY_ON;

		init_ext_buffer(threadsPar);

		bool needInitExtPar = false;

		// Init session
		// try searching on all display adapters
		initPar.Implementation = MFX_IMPL_HARDWARE_ANY;
		//initPar.Implementation = MFX_IMPL_AUTO_ANY;
		// Library should pick first available compatible adapter during InitEx call with MFX_IMPL_HARDWARE_ANY
		sts = m_mfxSession.InitEx(initPar);
		if (sts < MFX_ERR_NONE)
		{
			m_bInit = true;
			return sts;
		}
		sts = m_mfxSession.QueryVersion(&version); // get real API version of the loaded library
		if (sts < MFX_ERR_NONE)
		{
			m_bInit = true;
			return sts;
		}
		sts = m_mfxSession.QueryIMPL(&m_impl); // get actual library implementation
		if (sts < MFX_ERR_NONE)
		{
			m_bInit = true;
			return sts;
		}
		bool im = m_impl == MFX_IMPL_HARDWARE ? TRUE : FALSE;

		// create decoder
		m_pmfxDEC = new MFXVideoDECODE(m_mfxSession);

		if (NULL == m_pmfxDEC)
		{
			m_bInit = true;
			return MFX_ERR_MEMORY_ALLOC;
		}
		// set video type in parameters
		m_mfxVideoParams.mfx.CodecId = nType;

		// prepare bit stream
		sts = InitMfxBitstream(&m_mfxBS, nWidth*nHight*3/2);

		if (sts < MFX_ERR_NONE)
		{
			m_bInit = true;
			return sts;
		}
		if (CheckVersion(&version, MSDK_FEATURE_PLUGIN_API)) {
			/* Here we actually define the following codec initialization scheme:
			*  1. If plugin path or guid is specified: we load user-defined plugin (example: VP8 sample decoder plugin)
			*  2. If plugin path not specified:
			*    2.a) we check if codec is distributed as a mediasdk plugin and load it if yes
			*    2.b) if codec is not in the list of mediasdk plugins, we assume, that it is supported inside mediasdk library
			*/
			// Load user plug-in, should go after CreateAllocator function (when all callbacks were initialized)

			{
				bool isDefaultPlugin = false;
				if (AreGuidsEqual(Params.pluginParams.pluginGuid, MSDK_PLUGINGUID_NULL))
				{
					mfxIMPL impl = MFX_IMPL_HARDWARE;
					Params.pluginParams.pluginGuid = msdkGetPluginUID(impl, MSDK_VDECODE, nType);
					isDefaultPlugin = true;
				}
				if (!AreGuidsEqual(Params.pluginParams.pluginGuid, MSDK_PLUGINGUID_NULL))
				{
					m_pPlugin.reset(LoadPlugin(MFX_PLUGINTYPE_VIDEO_DECODE, m_mfxSession, Params.pluginParams.pluginGuid, 1));
					if (m_pPlugin.get() == NULL) sts = MFX_ERR_UNSUPPORTED;
				}
				if (sts == MFX_ERR_UNSUPPORTED)
				{
					m_bInit = true;
					return MFX_ERR_UNSUPPORTED;
				}
			}
			//验证mfxVideoParam结构中的输入参数。返回已更正的参数(如果有)或MFX_ERR_UNSUPPORTED如果参数无法纠正。设置为零的参数未经过验证
			sts = m_pmfxDEC->Query(&m_mfxVideoParams, &m_mfxVideoParams);
			if (sts < MFX_ERR_NONE)
			{
				m_bInit = true;
				return sts;
			}
		}
		m_bInit = true;
	}
	return sts;
}

mfxStatus CDecodingPipeline::DecodeHeader()
{
	mfxStatus sts = MFX_ERR_NONE;
	if (m_bInitMfx)
	{
		return MFX_ERR_NONE;
	}

	// Populate parameters. Involves DecodeHeader call
	sts = InitMfxParams();
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	m_bDecOutSysmem = false;

	m_eWorkMode = MODE_FILE_DUMP;
	if (m_eWorkMode == MODE_FILE_DUMP) {
		// prepare YUV file writer
	}
	else if ((m_eWorkMode != MODE_PERFORMANCE) && (m_eWorkMode != MODE_RENDERING)) {
		msdk_printf(MSDK_STRING("error: unsupported work mode\n"));
		return MFX_ERR_UNSUPPORTED;
	}

	// create device and allocator
	sts = CreateAllocator();
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	// in case of HW accelerated decode frames must be allocated prior to decoder initialization
	sts = AllocFrames();
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	//完全验证mfxVideoParam结构中的输入参数, 如果选择的配置不兼容,可能更正某些参数。 如果兼容性不能解决,返回MFX_ERR_INVALID_VIDEO_PARAM。
	//注意:GetVideoParam()可用于检索已更正的参数集
	sts = m_pmfxDEC->Init(&m_mfxVideoParams);

	if (MFX_WRN_PARTIAL_ACCELERATION == sts)
	{
		msdk_printf(MSDK_STRING("WARNING: partial acceleration\n"));
		MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
	}
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	sts = m_pmfxDEC->GetVideoParam(&m_mfxVideoParams);
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	m_bInitMfx = true;
	return sts;
}

void CDecodingPipeline::Close()
{
	WipeMfxBitstream(&m_mfxBS);
	MSDK_SAFE_DELETE(m_pmfxDEC);

	DeleteFrames();

	if (m_bIsExtBuffers)
	{
		DeallocateExtMVCBuffers();
		DeleteExtBuffers();
	}

	m_ExtBuffersMfxBS.clear();

	m_pPlugin.reset();
	m_mfxSession.Close();

	if (NULL != m_pmfxDEC)
	{
		m_pmfxDEC->Close();
		delete m_pmfxDEC;
		m_pmfxDEC = NULL;
	}

	// allocator if used as external for MediaSDK must be deleted after decoder
	DeleteAllocator();

	m_bInit = false;
	m_bInitMfx = false;
	return;
}

mfxStatus CDecodingPipeline::InitMfxParams()
{
	if (NULL == m_pmfxDEC)
	{
		return MFX_ERR_NULL_PTR;
	}
	mfxStatus sts = MFX_ERR_NONE;

	// try to find a sequence header in the stream
	// if header is not found this function exits with error (e.g. if device was lost and there's no header in the remaining stream)
	//解析输入比特流并填充mfxVideoParam结构体。 参数未经过验证。 解码器可能无法解码流
	sts = m_pmfxDEC->DecodeHeader(&m_mfxBS, &m_mfxVideoParams);
	if (MFX_ERR_MORE_DATA == sts)
	{
		memmove(m_mfxBS.Data, m_mfxBS.Data + m_mfxBS.DataOffset, m_mfxBS.DataLength);
		m_mfxBS.DataOffset = 0;
		return MFX_ERR_MORE_DATA;
	}
	else
	{
		// if input is interlaced JPEG stream
		m_mfxVideoParams.mfx.Rotation = MFX_ROTATION_0;
	}
	// check DecodeHeader status
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	
	if (NULL == m_pOutBuf)
	{
		int nLen = m_mfxVideoParams.mfx.FrameInfo.Height * m_mfxVideoParams.mfx.FrameInfo.Width * 3 / 2;
		m_pOutBuf = new unsigned char[nLen];
	}
	
	if (!m_mfxVideoParams.mfx.FrameInfo.FrameRateExtN || !m_mfxVideoParams.mfx.FrameInfo.FrameRateExtD) {
		msdk_printf(MSDK_STRING("pretending that stream is 30fps one\n"));
		m_mfxVideoParams.mfx.FrameInfo.FrameRateExtN = 30;
		m_mfxVideoParams.mfx.FrameInfo.FrameRateExtD = 1;
	}

	if (!m_mfxVideoParams.mfx.FrameInfo.AspectRatioW || !m_mfxVideoParams.mfx.FrameInfo.AspectRatioH) {
		msdk_printf(MSDK_STRING("pretending that aspect ratio is 1:1\n"));
		m_mfxVideoParams.mfx.FrameInfo.AspectRatioW = 1;
		m_mfxVideoParams.mfx.FrameInfo.AspectRatioH = 1;
	}

	// specify memory type
	m_mfxVideoParams.IOPattern = (mfxU16)(m_memType != SYSTEM_MEMORY ? MFX_IOPATTERN_OUT_VIDEO_MEMORY : MFX_IOPATTERN_OUT_SYSTEM_MEMORY);
	m_mfxVideoParams.AsyncDepth = 4;

	return MFX_ERR_NONE;
}


mfxStatus CDecodingPipeline::AllocFrames()
{
	if (NULL == m_pmfxDEC)
	{
		return MFX_ERR_NULL_PTR;
	}
	mfxStatus sts = MFX_ERR_NONE;

	mfxFrameAllocRequest Request;

	mfxU16 nSurfNum = 0; // number of surfaces for decoder
	MSDK_ZERO_MEMORY(Request);

	// calculate number of surfaces required for decoder
	//用于计算输入表面数量
	sts = m_pmfxDEC->QueryIOSurf(&m_mfxVideoParams, &Request);
	if (MFX_WRN_PARTIAL_ACCELERATION == sts)
	{
		m_bDecOutSysmem = true;
	}
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	if (m_nMaxFps)
	{
		// Add surfaces for rendering smoothness
		Request.NumFrameSuggested += m_nMaxFps / 3;
	}

	if ((Request.NumFrameSuggested < m_mfxVideoParams.AsyncDepth) && (m_impl & MFX_IMPL_HARDWARE_ANY))
		return MFX_ERR_MEMORY_ALLOC;

	Request.Type |= (m_bDecOutSysmem) ? MFX_MEMTYPE_SYSTEM_MEMORY : MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;

	// alloc frames for decoder
	sts = m_pGeneralAllocator->Alloc(m_pGeneralAllocator->pthis, &Request, &m_mfxResponse);
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	// prepare mfxFrameSurface1 array for decoder
	nSurfNum = m_mfxResponse.NumFrameActual;
	sts = AllocBuffers(nSurfNum);
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	for (int i = 0; i < nSurfNum; i++)
	{
		// initating each frame:
		MSDK_MEMCPY_VAR(m_pSurfaces[i].frame.Info, &(Request.Info), sizeof(mfxFrameInfo));
		if (m_bExternalAlloc)
		{
			m_pSurfaces[i].frame.Data.MemId = m_mfxResponse.mids[i];
		}
		else
		{
			sts = m_pGeneralAllocator->Lock(m_pGeneralAllocator->pthis, m_mfxResponse.mids[i], &(m_pSurfaces[i].frame.Data));
			if (sts < MFX_ERR_NONE)
			{
				return sts;
			}
		}
	}
	return MFX_ERR_NONE;
}

mfxStatus CDecodingPipeline::CreateAllocator()
{
	mfxStatus sts = MFX_ERR_NONE;

	m_pGeneralAllocator = new GeneralAllocator();

	// initialize memory allocator
	sts = m_pGeneralAllocator->Init(m_pmfxAllocatorParams);

	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	return MFX_ERR_NONE;
}

void CDecodingPipeline::DeleteFrames()
{
	m_pCurrentFreeSurface = NULL;
	MSDK_SAFE_FREE(m_pCurrentFreeOutputSurface);

	// delete frames
	if (m_pGeneralAllocator)
	{
		m_pGeneralAllocator->Free(m_pGeneralAllocator->pthis, &m_mfxResponse);
	}

	return;
}

void CDecodingPipeline::DeleteAllocator()
{
	// delete allocator
	MSDK_SAFE_DELETE(m_pGeneralAllocator);
	MSDK_SAFE_DELETE(m_pmfxAllocatorParams);
}

void CDecodingPipeline::SetMultiView()
{
	m_bIsMVC = true;
}

// function for allocating a specific external buffer
template <typename Buffer>
mfxStatus CDecodingPipeline::AllocateExtBuffer()
{
	std::unique_ptr<Buffer> pExtBuffer(new Buffer());
	if (!pExtBuffer.get())
		return MFX_ERR_MEMORY_ALLOC;

	init_ext_buffer(*pExtBuffer);

	m_ExtBuffers.push_back(reinterpret_cast<mfxExtBuffer*>(pExtBuffer.release()));

	return MFX_ERR_NONE;
}

void CDecodingPipeline::AttachExtParam()
{
	m_mfxVideoParams.ExtParam = reinterpret_cast<mfxExtBuffer**>(&m_ExtBuffers[0]);
	m_mfxVideoParams.NumExtParam = static_cast<mfxU16>(m_ExtBuffers.size());
}

void CDecodingPipeline::DeleteExtBuffers()
{
	for (std::vector<mfxExtBuffer *>::iterator it = m_ExtBuffers.begin(); it != m_ExtBuffers.end(); ++it)
		delete *it;
	m_ExtBuffers.clear();
}

mfxStatus CDecodingPipeline::AllocateExtMVCBuffers()
{
	mfxU32 i;

	mfxExtMVCSeqDesc* pExtMVCBuffer = (mfxExtMVCSeqDesc*)m_mfxVideoParams.ExtParam[0];

	if (NULL == pExtMVCBuffer)
	{
		return MFX_ERR_MEMORY_ALLOC;
	}

	pExtMVCBuffer->View = new mfxMVCViewDependency[pExtMVCBuffer->NumView];

	if (NULL == pExtMVCBuffer->View)
	{
		return MFX_ERR_MEMORY_ALLOC;
	}
	for (i = 0; i < pExtMVCBuffer->NumView; ++i)
	{
		MSDK_ZERO_MEMORY(pExtMVCBuffer->View[i]);
	}
	pExtMVCBuffer->NumViewAlloc = pExtMVCBuffer->NumView;

	pExtMVCBuffer->ViewId = new mfxU16[pExtMVCBuffer->NumViewId];

	if (NULL == pExtMVCBuffer->ViewId)
	{
		return MFX_ERR_MEMORY_ALLOC;
	}
	for (i = 0; i < pExtMVCBuffer->NumViewId; ++i)
	{
		MSDK_ZERO_MEMORY(pExtMVCBuffer->ViewId[i]);
	}
	pExtMVCBuffer->NumViewIdAlloc = pExtMVCBuffer->NumViewId;

	pExtMVCBuffer->OP = new mfxMVCOperationPoint[pExtMVCBuffer->NumOP];

	if (NULL == pExtMVCBuffer->OP)
	{
		return MFX_ERR_MEMORY_ALLOC;
	}
	for (i = 0; i < pExtMVCBuffer->NumOP; ++i)
	{
		MSDK_ZERO_MEMORY(pExtMVCBuffer->OP[i]);
	}
	pExtMVCBuffer->NumOPAlloc = pExtMVCBuffer->NumOP;

	return MFX_ERR_NONE;
}

void CDecodingPipeline::DeallocateExtMVCBuffers()
{
	mfxExtMVCSeqDesc* pExtMVCBuffer = (mfxExtMVCSeqDesc*)m_mfxVideoParams.ExtParam[0];
	if (pExtMVCBuffer != NULL)
	{
		MSDK_SAFE_DELETE_ARRAY(pExtMVCBuffer->View);
		MSDK_SAFE_DELETE_ARRAY(pExtMVCBuffer->ViewId);
		MSDK_SAFE_DELETE_ARRAY(pExtMVCBuffer->OP);
	}

	MSDK_SAFE_DELETE(m_mfxVideoParams.ExtParam[0]);

	m_bIsExtBuffers = false;
}

mfxStatus CDecodingPipeline::ResetDecoder(sInputParams *pParams)
{
	mfxStatus sts = MFX_ERR_NONE;

	// close decoder
	sts = m_pmfxDEC->Close();
	MSDK_IGNORE_MFX_STS(sts, MFX_ERR_NOT_INITIALIZED);

	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	// free allocated frames
	DeleteFrames();

	// initialize parameters with values from parsed header
	sts = InitMfxParams();

	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	// in case of HW accelerated decode frames must be allocated prior to decoder initialization
	sts = AllocFrames();

	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	// init decoder
	sts = m_pmfxDEC->Init(&m_mfxVideoParams);
	if (MFX_WRN_PARTIAL_ACCELERATION == sts)
	{
		msdk_printf(MSDK_STRING("WARNING: partial acceleration\n"));
		MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
	}

	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}

	return MFX_ERR_NONE;
}

mfxStatus CDecodingPipeline::DeliverOutput(mfxFrameSurface1* frame)
{
	mfxStatus res = MFX_ERR_NONE, sts = MFX_ERR_NONE;

	if (!frame) {
		return MFX_ERR_NULL_PTR;
	}

	if (m_bExternalAlloc) {
		if (m_eWorkMode == MODE_FILE_DUMP) {
			res = m_pGeneralAllocator->Lock(m_pGeneralAllocator->pthis, frame->Data.MemId, &(frame->Data));
			if (MFX_ERR_NONE == res) {

				sts = m_pGeneralAllocator->Unlock(m_pGeneralAllocator->pthis, frame->Data.MemId, &(frame->Data));
			}
			if ((MFX_ERR_NONE == res) && (MFX_ERR_NONE != sts)) {
				res = sts;
			}
		}
		else if (m_eWorkMode == MODE_RENDERING) {

		}
	}
	else
	{
		mfxFrameInfo &pInfo = frame->Info;
		mfxFrameData &pData = frame->Data;

		mfxU32 vid = pInfo.FrameId.ViewId;
		std::vector<mfxU16> tmp;
		mfxU32 nLenTmp = 0;

		/*
		int NV12ToI420(const uint8_t* src_y,
			int src_stride_y,
			const uint8_t* src_uv,
			int src_stride_uv,
			uint8_t* dst_y,
			int dst_stride_y,
			uint8_t* dst_u,
			int dst_stride_u,
			uint8_t* dst_v,
			int dst_stride_v);
			*/
		//Y
		int dst_pos = 0;
		int src_pos = 0;
		unsigned char* y = m_pOutBuf;
		unsigned char* u = m_pOutBuf + pInfo.CropW * pInfo.CropH;
		unsigned char* v = m_pOutBuf + pInfo.CropW * pInfo.CropH * 5 / 4;

		libyuv::NV12ToI420(pData.Y, pData.Pitch, pData.UV, pData.Pitch, y, pInfo.CropW, u, pInfo.CropW >> 1, v, pInfo.CropW >>1, pInfo.CropW, pInfo.CropH);

		/*
		for (int i = 0; i < pInfo.CropH; i++) //Y
		{
			memcpy(m_pOutBuf + dst_pos, pData.Y + src_pos, pInfo.CropW);
			dst_pos += pInfo.CropW;
			src_pos += pData.Pitch;
		}

		//UV
		{
			int index = 0;
			src_pos = 0;
			int uv_size = pInfo.CropH * 3 / 2;
			for (int i = pInfo.CropH; i < uv_size; i++)
			{
				unsigned char* uv = pData.UV + src_pos;
				for (int j = 0; j < pInfo.CropW; j += 2)
				{
					u[index] = uv[j];
					v[index] = uv[j + 1];
					index++;
				}

				src_pos += pData.Pitch;
			}
		}
		*/
		
		mfxU64      TimeStamp = 0;
		TimeStamp = pData.TimeStamp;

		long diff = 0;
		if (0 == m_last_pts)
		{
			m_last_pts = m_nPts;
		}
		diff = (long)(m_nPts - m_last_pts) * 1000 / 90000;
		m_last_pts = m_nPts;

		int nLen = pInfo.CropH*pInfo.CropW * 3 / 2;

		//fwrite(m_pOutBuf, 1, nLen, fp);
		m_cb(m_pOutBuf, nLen, m_nType, pInfo.CropW, pInfo.CropH, m_nPts, m_nPts_ref_distance, diff, m_pcontext);
	}

	return res;
}

mfxStatus CDecodingPipeline::DeliverLoop(void)
{
	mfxStatus res = MFX_ERR_NONE;

	while (!m_bStopDeliverLoop) {
		m_pDeliverOutputSemaphore->Wait();
		if (m_bStopDeliverLoop) {
			continue;
		}
		if (MFX_ERR_NONE != m_error) {
			continue;
		}

		msdk_atomic_inc32(&m_output_count);
		m_pDeliveredEvent->Signal();
	}
	return res;
}

unsigned int MFX_STDCALL CDecodingPipeline::DeliverThreadFunc(void* ctx)
{
	CDecodingPipeline* pipeline = (CDecodingPipeline*)ctx;

	mfxStatus sts;
	sts = pipeline->DeliverLoop();

	return 0;
}

mfxStatus CDecodingPipeline::SyncOutputSurface(mfxU32 wait)
{
	if (!m_pCurrentOutputSurface)
	{
		m_pCurrentOutputSurface = m_OutputSurfacesPool.GetSurface();
	}
	if (!m_pCurrentOutputSurface)
	{
		return MFX_ERR_MORE_DATA;
	}

	mfxStatus sts = m_mfxSession.SyncOperation(m_pCurrentOutputSurface->syncp, wait);

	if (MFX_ERR_GPU_HANG == sts && m_bSoftRobustFlag) {
		msdk_printf(MSDK_STRING("GPU hang happened\n"));
		// Output surface can be corrupted
		// But should be delivered to output anyway
		sts = MFX_ERR_NONE;
	}

	if (MFX_WRN_IN_EXECUTION == sts) {
		return sts;
	}
	if (MFX_ERR_NONE == sts) {
		// we got completely decoded frame - pushing it to the delivering thread...
		++m_synced_count;

		if (m_eWorkMode == MODE_PERFORMANCE) {
			m_output_count = m_synced_count;
			ReturnSurfaceToBuffers(m_pCurrentOutputSurface);
		}
		else if (m_eWorkMode == MODE_FILE_DUMP) {
			sts = DeliverOutput(&(m_pCurrentOutputSurface->surface->frame));
			if (MFX_ERR_NONE != sts) {
				sts = MFX_ERR_UNKNOWN;
			}
			else {
				m_output_count = m_synced_count;
			}
			ReturnSurfaceToBuffers(m_pCurrentOutputSurface);
		}
		else if (m_eWorkMode == MODE_RENDERING) {
			m_DeliveredSurfacesPool.AddSurface(m_pCurrentOutputSurface);
			m_pDeliveredEvent->Reset();
			m_pDeliverOutputSemaphore->Post();
		}
		m_pCurrentOutputSurface = NULL;
	}

	return sts;
}

mfxStatus CDecodingPipeline::DecodeFrame(const char * pBuf, long  nLen, long ntype, unsigned __int64 pts, long pts_ref_distance, unsigned __int64 dts)
{
	xlock locker(&m_memLock);
	m_nType = ntype;
	m_nPts = pts;
	m_nPts_ref_distance = pts_ref_distance;
	m_nDts = dts;

	mfxStatus           sts = MFX_ERR_NONE;
	if (m_mfxBS.MaxLength - m_mfxBS.DataLength <= nLen)
	{
		int nNewLen = m_mfxBS.DataLength + nLen + 1;
		sts = ExtendMfxBitstream(&m_mfxBS, nNewLen);
	}
	//memmove(pData, pBitstream->Data + nOffset, pBitstream->DataLength);
	memcpy(m_mfxBS.Data + m_mfxBS.DataLength, pBuf, nLen);
	m_mfxBS.DataLength += nLen;
	m_mfxBS.DecodeTimeStamp = MFX_FRAMEDATA_ORIGINAL_TIMESTAMP;
	m_mfxBS.TimeStamp = MFX_FRAMEDATA_ORIGINAL_TIMESTAMP;

	sts = DecodeHeader();
	if (sts < MFX_ERR_NONE)
	{
		return sts;
	}
	mfxFrameSurface1*   pOutSurface = NULL;
	mfxBitstream*       pBitstream = &m_mfxBS;
	while (((sts == MFX_ERR_NONE) || (MFX_ERR_MORE_DATA == sts) || (MFX_ERR_MORE_SURFACE == sts)))
	{
		if (MFX_ERR_NONE != m_error)
		{
			//msdk_printf(MSDK_STRING("DeliverOutput return error = %d\n"),m_error);
			break;
		}

		if (pBitstream && ((MFX_ERR_MORE_DATA == sts) || (m_bIsCompleteFrame && !pBitstream->DataLength)))
		{
			break;
		}

		if ((MFX_ERR_NONE == sts) || (MFX_ERR_MORE_DATA == sts) || (MFX_ERR_MORE_SURFACE == sts)) {
			SyncFrameSurfaces();
			if (!m_pCurrentFreeSurface) {
				m_pCurrentFreeSurface = m_FreeSurfacesPool.GetSurface();
			}

			if (!m_pCurrentFreeSurface || (m_OutputSurfacesPool.GetSurfaceCount() == m_mfxVideoParams.AsyncDepth))
			{
				// we stuck with no free surface available, now we will sync...
				sts = SyncOutputSurface(MSDK_DEC_WAIT_INTERVAL);
				if (MFX_ERR_MORE_DATA == sts) {
					if ((m_eWorkMode == MODE_PERFORMANCE) || (m_eWorkMode == MODE_FILE_DUMP)) {
						sts = MFX_ERR_NOT_FOUND;
					}
					else if (m_eWorkMode == MODE_RENDERING) {
						if (m_synced_count != m_output_count) {
							sts = m_pDeliveredEvent->TimedWait(MSDK_DEC_WAIT_INTERVAL);
						}
						else {
							sts = MFX_ERR_NOT_FOUND;
						}
					}
					if (MFX_ERR_NOT_FOUND == sts) {
						//msdk_printf(MSDK_STRING("fatal: failed to find output surface, that's a bug!\n"));
						break;
					}
				}
				// note: MFX_WRN_IN_EXECUTION will also be treated as an error at this point
				continue;
			}

			if (!m_pCurrentFreeOutputSurface)
			{
				m_pCurrentFreeOutputSurface = GetFreeOutputSurface();
			}
			if (!m_pCurrentFreeOutputSurface)
			{
				sts = MFX_ERR_NOT_FOUND;
				break;
			}
		}

		if ((MFX_ERR_NONE == sts) || (MFX_ERR_MORE_DATA == sts) || (MFX_ERR_MORE_SURFACE == sts))
		{
			pOutSurface = NULL;
			sts = m_pmfxDEC->DecodeFrameAsync(pBitstream, &(m_pCurrentFreeSurface->frame), &pOutSurface, &(m_pCurrentFreeOutputSurface->syncp));

			if (sts > MFX_ERR_NONE)
			{
				// ignoring warnings...
				if (m_pCurrentFreeOutputSurface->syncp)
				{
					MSDK_SELF_CHECK(pOutSurface);
					// output is available
					sts = MFX_ERR_NONE;
				}
				else
				{
					// output is not available
					sts = MFX_ERR_MORE_SURFACE;
				}
			}
			else if ((MFX_ERR_MORE_DATA == sts) && pBitstream)
			{
				if (m_bIsCompleteFrame && pBitstream->DataLength)
				{
					// In low_latency mode decoder have to process bitstream completely
					//msdk_printf(MSDK_STRING("error: Incorrect decoder behavior in low latency mode (bitstream length is not equal to 0 after decoding)\n"));
					sts = MFX_ERR_UNDEFINED_BEHAVIOR;
					continue;
				}
				else
				{
					memmove(m_mfxBS.Data, m_mfxBS.Data + m_mfxBS.DataOffset, m_mfxBS.DataLength);
					m_mfxBS.DataOffset = 0;
				}
			}
			else if ((MFX_ERR_MORE_DATA == sts) && !pBitstream)
			{
				// that's it - we reached end of stream; now we need to render bufferred data...
				do {
					sts = SyncOutputSurface(MSDK_DEC_WAIT_INTERVAL);
				} while (MFX_ERR_NONE == sts);

			}
			else if (MFX_ERR_INCOMPATIBLE_VIDEO_PARAM == sts)
			{
				//bErrIncompatibleVideoParams = true;
				// need to go to the buffering loop prior to reset procedure
				pBitstream = NULL;
				sts = MFX_ERR_NONE;
			}
		}

		if ((MFX_ERR_NONE == sts) || (MFX_ERR_MORE_DATA == sts) || (MFX_ERR_MORE_SURFACE == sts)) {
			// if current free surface is locked we are moving it to the used surfaces array
			/*if (m_pCurrentFreeSurface->frame.Data.Locked)*/ {
				m_UsedSurfacesPool.AddSurface(m_pCurrentFreeSurface);
				m_pCurrentFreeSurface = NULL;
			}
		}
		else
		{
			//MSDK_CHECK_STATUS_NO_RET(sts, "DecodeFrameAsync returned error status");
		}

		if (MFX_ERR_NONE == sts)
		{
			msdkFrameSurface* surface = FindUsedSurface(pOutSurface);

			msdk_atomic_inc16(&(surface->render_lock));

			m_pCurrentFreeOutputSurface->surface = surface;
			m_OutputSurfacesPool.AddSurface(m_pCurrentFreeOutputSurface);
			m_pCurrentFreeOutputSurface = NULL;
		}
	} //while processing
	return sts; // ERR_NONE or ERR_INCOMPATIBLE_VIDEO_PARAM
}

总结

这里我们说说:

  • 1、virtual mfxStatus DeliverOutput(mfxFrameSurface1* frame);这是个解码输出函数,sdk解码输出为YV12格式,而我们需要使用的是YUV格式,所以需要进行一个色彩空间转换,由于直接通过循环拷贝对效率影响比较大,所以这里转换时,我们使用了LibYUV库。
  • 2、由于各PC间的操作系统及硬件配置各有差异,所以在开发时,我们需要尽量兼容大部分PC,这里需要注意与原sdk略有差异的地方,是有几个函数调用顺序有变动,如Query这个函数,这个函数在部分win 7老式台式机上,前面的初始化都能成功,但调用这个函数,也就是调用MFXVideoDECODE_Query时,总是返回-1,对应错误码,也就是MFX_ERR_UNKNOWN,由于更低层代码,我们不能看到,所以具体原因,暂时不明,所以最好把这个函数放在初始化,而不放在解码文件头部分,其他见代码。
  • 3、我们在这里使用时没有使用VPP插件。
  • 4、在我们使用DXVAChecker这个软件来查看我们的PC机是否支持Intel是否支持硬解码时,我们发现有得机器DXVAChecker检测出来是支持的,但我们初始化始终失败,在我遇到的一台机器就是小米笔记本,解决方案是下载最新的驱动,重新安装。注意:我们这里下载的驱动,在Intel官网下载的不一定ok,因为Intel官网出的都是通用版本,机器厂商自己出的才是匹配的,所以这里的驱动最好是去机器生产商官网下载。
发布了135 篇原创文章 · 获赞 67 · 访问量 23万+

猜你喜欢

转载自blog.csdn.net/y601500359/article/details/101195399