如何通过Android NDK间接使用GraphicBuffer

GraphicBuffer是Android设计的一种高性能buffer,其具备一些比较优越的特性,如:

  1. 可以在多个进程中传递
  2. 可以在多个硬件设备中共享使用,如CPU、GPU、HWC
  3. 可以生成EglImage然后绑定到Texture或者renderBuffer上

这几个特性可以实现的功能有:

  1. 跨进程传递渲染结果
  2. 在使用GraphicBuffer绑定纹理时,可以减少CPU和GPU间的数据拷贝

但在GraphicBuffer在使用时存在一个严重的限制,需要在Android源码环境下使用。
从Android7之后,限制了对GraphicBuffer使用,NDK中不能直接使用GraphicBuffer。

那么是否存在方法通过NDK来间接使用GraphicBuffer?

答案是肯定的,而且存在两种方案:

  1. 通过NDK的libnativewindow.so的AHardwareBuffer_allocate方法创建一个AHardwareBuffer
  2. 通过EGL的eglCreateNativeClientBufferANDROID扩展来创建一个EGLClientBuffer

下边分别介绍下两种方法

1. 通过NDK AHardwareBuffer_allocate创建AHardwareBuffer

首先需要了解下AHardwareBuffer_Desc结构体

typedef struct AHardwareBuffer_Desc {
    
    
    uint32_t    width;      // width in pixels
    uint32_t    height;     // height in pixels
    uint32_t    layers;     // number of images
    uint32_t    format;     // One of AHARDWAREBUFFER_FORMAT_*
    uint64_t    usage;      // Combination of AHARDWAREBUFFER_USAGE_*
    uint32_t    stride;     // Stride in pixels, ignored for AHardwareBuffer_allocate()
    uint32_t    rfu0;       // Initialize to zero, reserved for future use
    uint64_t    rfu1;       // Initialize to zero, reserved for future use
} AHardwareBuffer_Desc;

具体实现代码如下:


#define LOAD_PROC(NAME, TYPE)                                           \
    NAME = reinterpret_cast<TYPE>(eglGetProcAddress(# NAME))
    // First, load entry points provided by extensions.
    LOAD_PROC(glEGLImageTargetTexture2DOES,
              PFNGLEGLIMAGETARGETTEXTURE2DOESPROC);

    LOAD_PROC(eglGetNativeClientBufferANDROID,
              PFNEGLGETNATIVECLIENTBUFFERANDROID);

    LOAD_PROC(eglCreateImageKHR, PFNEGLCREATEIMAGEKHRPROC);

    LOAD_PROC(glFramebufferTextureMultiviewOVR,
              PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC);

    LOAD_PROC(glFramebufferTextureMultisampleMultiviewOVR,
              PFNGLFRAMEBUFFERTEXTUREMULTISAMPLEMULTIVIEWOVRPROC);

    // Try creating a 32x32 AHardwareBuffer and attaching it to a multiview
    // framebuffer, with various formats and depths.
    AHardwareBuffer_Desc desc = {
    
    };
    desc.width = 32;
    desc.height = 32;
    desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
                 AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
    const int layers[] = {
    
    2, 4};
    const int formats[] = {
    
    
      AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM,
      AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,
      // Do not test AHARDWAREBUFFER_FORMAT_BLOB, it isn't color-renderable.
    };
    const int samples[] = {
    
    1, 2, 4};
    for (int nsamples : samples) {
    
    
      for (auto nlayers : layers) {
    
    
        for (auto format : formats) {
    
    
          desc.layers = nlayers;
          desc.format = format;
          testEglImageArray(env, desc, nsamples);
        }
      }
    }
}
static void testEglImageArray(JNIEnv* env, AHardwareBuffer_Desc desc,
                              int nsamples) {
    
    

    AHardwareBuffer* hwbuffer = nullptr;
    int error = AHardwareBuffer_allocate(&desc, &hwbuffer);

    // Create EGLClientBuffer from the AHardwareBuffer.
    EGLClientBuffer native_buffer = eglGetNativeClientBufferANDROID(hwbuffer);

    // Create EGLImage from EGLClientBuffer.
    EGLint attrs[] = {
    
    EGL_NONE};
    EGLImageKHR image =
        eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT,
                          EGL_NATIVE_BUFFER_ANDROID, native_buffer, attrs);

    // Create OpenGL texture from the EGLImage.
    GLuint texid;
    glGenTextures(1, &texid);
    glBindTexture(GL_TEXTURE_2D_ARRAY, texid);
    glEGLImageTargetTexture2DOES(GL_TEXTURE_2D_ARRAY, image);

    // Create FBO and add multiview attachment.
    GLuint fboid;
    glGenFramebuffers(1, &fboid);
    glBindFramebuffer(GL_FRAMEBUFFER, fboid);
    const GLint miplevel = 0;
    const GLint base_view = 0;
    const GLint num_views = desc.layers;
    if (nsamples == 1) {
    
    
        glFramebufferTextureMultiviewOVR(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
                                         texid, miplevel, base_view, num_views);
    } else {
    
    
        glFramebufferTextureMultisampleMultiviewOVR(
            GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texid, miplevel, nsamples,
            base_view, num_views);
    }

    glCheckFramebufferStatus(GL_FRAMEBUFFER);
    //do some render
    
    glDeleteTextures(1, &texid);
    glDeleteFramebuffers(1, &fboid);
    AHardwareBuffer_release(hwbuffer);
}

2. 通过EGL eglCreateNativeClientBufferANDROID扩展创建EGLClientBuffer

EGLint attrs[] = {
    
    
	EGL_WIDTH, 10,
	EGL_HEIGHT,10,
	EGL_RED_SIZE,8,
	EGL_GREEN_SIZE,8,
	EGL_BLUE_SIZE 8,
	EGL_ALPHA_SIZE,8,
	EGL_NATIVE_BUFFER_USAGE_ANDROID,EGL_NATIVE_BUFFER_USAGE_TEXTURE_BIT_ANDROID,
	EGL_NONE };
  
EGLClientBuffer native_buffer = eglCreateNativeClientBufferANDROID(attrs);

// Create EGLImage from EGLClientBuffer.
EGLint attrs[] = {
    
    EGL_NONE};
EGLImageKHR image =
	eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT,
	                 EGL_NATIVE_BUFFER_ANDROID, native_buffer, attrs);

// Create OpenGL texture from the EGLImage.
GLuint texid;
glGenTextures(1, &texid);
glBindTexture(GL_TEXTURE_2D_ARRAY, texid);
glEGLImageTargetTexture2DOES(GL_TEXTURE_2D_ARRAY, image);

3. 两种实现的优点

  1. AHardwareBuffer_allocate不依赖EGL环境
  2. eglCreateNativeClientBufferANDROID需要依赖EGL环境

猜你喜欢

转载自blog.csdn.net/u010116586/article/details/108111632