Android OpenGL ES 3.0 相机基础滤镜

1.动态网格滤镜特效

1.1顶点着色器
#version 300 es
layout(location = 0) in vec3 attr_position;
layout(location = 1) in vec2 attr_uv;

uniform mat4   uni_mat;
out vec2   v_texcoord;

void main(void)
{
    v_texcoord = attr_uv;
    gl_Position = uni_mat* vec4(attr_position,1.0);
}

传入的是MVP矩阵和材质坐标

1.2片元着色器
#version 300 es
precision mediump float;
//precision highp float;

uniform sampler2D uni_textureY;
uniform sampler2D uni_textureU;
uniform sampler2D uni_textureV;

in vec2 v_texcoord;
out vec4 fragColor;

uniform float u_offset;//偏移量
uniform vec2 texSize;//纹理尺寸

vec4 YuvToRgb(vec2 uv){
    vec3 yuv;
    vec3 rgb;
    yuv.x = texture(uni_textureY, uv).r;
    yuv.y = texture(uni_textureU, uv).r - 0.5;
    yuv.z = texture(uni_textureV, uv).r - 0.5;
    rgb = mat3( 1,1,1, 0,-0.39465,2.03211,1.13983,-0.58060,0) * yuv;
    return vec4(rgb, 1);
}


void main(void)
{
    vec2 imgTexCoord = v_texcoord * texSize;//将纹理坐标系转换为图片坐标系
    float sideLength = texSize.y / 6.0;//网格的边长
    float maxOffset = 0.15 * sideLength;//设置网格线宽度的最大值
    float x = mod(imgTexCoord.x, floor(sideLength));
    float y = mod(imgTexCoord.y, floor(sideLength));

    float offset = u_offset * maxOffset;

    if(offset <= x
    && x <= sideLength - offset
    && offset <= y
    && y <= sideLength - offset)
    {
        fragColor = YuvToRgb(v_texcoord);
    }
    else
    {
        fragColor = vec4(1.0, 1.0, 1.0, 1.0);
    }
}

YuvToRgb:这个用于将yuv的数据转成rgb渲染到屏幕上

动态网格主要是将纹理划分为多个网格,然后根据一个偏移量动态改变网格的宽度。

mod:返回 x – y * floor (x / y) ,即求模计算 %

floor:返回小于等于 x 的最大整数值

计算之前需要将纹理坐标系转换为图片坐标系,保证网格没有被拉伸。

2.分屏特效

2.1顶点着色器
#version 300 es
layout(location = 0) in vec3 attr_position;
layout(location = 1) in vec2 attr_uv;

uniform mat4   uni_mat;
out vec2   v_texcoord;

void main(void)
{
    v_texcoord = attr_uv;
    gl_Position = uni_mat* vec4(attr_position,1.0);
}

这块主要是顶点数据以及材质,所以跟上边的是一样的

2.2片元着色器
#version 300 es
precision mediump float;
//precision highp float;

uniform sampler2D uni_textureY;
uniform sampler2D uni_textureU;
uniform sampler2D uni_textureV;

in vec2 v_texcoord;
out vec4 fragColor;

vec4 YuvToRgb(vec2 uv){
    vec3 yuv;
    vec3 rgb;
    yuv.x = texture(uni_textureY, uv).r;
    yuv.y = texture(uni_textureU, uv).r - 0.5;
    yuv.z = texture(uni_textureV, uv).r - 0.5;
    rgb = mat3( 1,1,1, 0,-0.39465,2.03211,1.13983,-0.58060,0) * yuv;
    return vec4(rgb, 1);
}


void main(void)
{

    /*四分屏  效果正常*/
    vec2 newTexCoord = v_texcoord;
    if(newTexCoord.x < 0.5)
    {
        newTexCoord.x = newTexCoord.x * 2.0;
    }
    else
    {
        newTexCoord.x = (newTexCoord.x - 0.5) * 2.0;
    }

    if(newTexCoord.y < 0.5)
    {
        newTexCoord.y = newTexCoord.y * 2.0;
    }
    else
    {
        newTexCoord.y = (newTexCoord.y - 0.5) * 2.0;
    }

    fragColor = YuvToRgb(newTexCoord);
}

分屏滤镜的原理是在多个指定区域内对整个纹理进行下采样(缩小),从而实现整个图像在多个区域内多次显示。

3.缩放的圆特效

3.1顶点着色器
#version 300 es
layout(location = 0) in vec3 attr_position;
layout(location = 1) in vec2 attr_uv;

uniform mat4   uni_mat;
out vec2   v_texcoord;

void main(void)
{
    v_texcoord = attr_uv;
    gl_Position = uni_mat* vec4(attr_position,1.0);
}

这块主要是顶点数据以及材质,所以跟上边的是一样的

3.2片元着色器
#version 300 es
precision mediump float;
//precision highp float;

uniform sampler2D uni_textureY;
uniform sampler2D uni_textureU;
uniform sampler2D uni_textureV;

in vec2 v_texcoord;
out vec4 fragColor;

uniform float u_offset;//偏移量
uniform vec2 texSize;//纹理尺寸

vec4 YuvToRgb(vec2 uv){
    vec3 yuv;
    vec3 rgb;
    yuv.x = texture(uni_textureY, uv).r;
    yuv.y = texture(uni_textureU, uv).r - 0.5;
    yuv.z = texture(uni_textureV, uv).r - 0.5;
    rgb = mat3( 1,1,1, 0,-0.39465,2.03211,1.13983,-0.58060,0) * yuv;
    return vec4(rgb, 1);
}



void main(void)
{
    vec2 imgTex = v_texcoord * texSize;//将纹理坐标系转换为图片坐标系
    float r = (u_offset + 0.2 ) * texSize.x;   //0.2 表示圆圈的默认大小
    if(distance(imgTex, vec2(texSize.x / 2.0, texSize.y / 2.0)) < r)
    {
        fragColor = YuvToRgb(v_texcoord);
    }
    else
    {
        fragColor = vec4(1.0, 1.0, 1.0, 1.0);
    }

}

4.渲染程序部分

void MSDynamicGridLine::UpdateYUVData(MSYUVData_Frame *yuvFrame) {
    if(yuvFrame == nullptr ){
        return;
    }

    if(m_nVideoH != yuvFrame->height || m_nVideoW != yuvFrame->width){

        if(nullptr != m_pBufYuv420p)
        {
            free(m_pBufYuv420p);
            m_pBufYuv420p=nullptr;
        }
    }


    m_nVideoW =  yuvFrame->width;
    m_nVideoH = yuvFrame->height;

    m_yFrameLength = yuvFrame->luma.length;
    m_uFrameLength = yuvFrame->chromaB.length;
    m_vFrameLength = yuvFrame->chromaR.length;


    //申请内存存一帧yuv图像数据,其大小为分辨率的1.5倍
    int nLen = m_yFrameLength + m_uFrameLength +m_vFrameLength;

    if(nullptr == m_pBufYuv420p)
    {
        m_pBufYuv420p = ( unsigned char*) malloc(nLen);
    }

    memcpy(m_pBufYuv420p,yuvFrame->luma.dataBuffer,m_yFrameLength);
    memcpy(m_pBufYuv420p+m_yFrameLength,yuvFrame->chromaB.dataBuffer,m_uFrameLength);
    memcpy(m_pBufYuv420p+m_yFrameLength +m_uFrameLength,yuvFrame->chromaR.dataBuffer,m_vFrameLength);

    m_bUpdateData =true;
}

这个方法用于更新YUV数据,由于数据是从相机过来的,跟opengl 不是一个线程,所以需要通过内存共享的方式进行拷贝。

void MSDynamicGridLine::Render(MSGLCamera *pCamera) {
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    if(m_bUpdateData == false){
        return;
    }

    static MSVertex triangleVert[] = {
            {-1, 1,  1,     0,0},
            {-1, -1,  1,    0,1},
            {1,  1,  1,     1,0},
            {1,  -1,  1,    1,1},
    };


    glm::mat4x4  objectMat = glm::mat4x4(1.0);
    glm::mat4x4  objectTransMat = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, -5));
    objectMat = objectMat * objectTransMat;

    objectMat = pCamera->projectionMatrix * pCamera->viewMatrix * objectMat ;


    m_pOpenGLShader->Bind();

    m_pOpenGLShader->SetUniformValue("uni_mat",objectMat);


    m_pOpenGLShader->EnableAttributeArray("attr_position");
    m_pOpenGLShader->SetAttributeBuffer("attr_position",GL_FLOAT,triangleVert,3,sizeof(MSVertex));

    m_pOpenGLShader->EnableAttributeArray("attr_uv");
    m_pOpenGLShader->SetAttributeBuffer("attr_uv",GL_FLOAT,&triangleVert[0].u,2,sizeof(MSVertex));

    m_PeriodicFrameIndex++;
    float progress = GetFrameProgress();
    m_pOpenGLShader->SetUniformValue("u_offset",0.2f * progress);

    LOGD("m_nVideoW is %d,progress is %f",m_nVideoW,progress);

    if (m_nVideoW>0){
        m_pOpenGLShader->SetUniformValue("texSize",glm::vec2(m_nVideoW,m_nVideoH));
    }else{
        m_pOpenGLShader->SetUniformValue("texSize",glm::vec2(720,1280));
    }


    m_pOpenGLShader->SetUniformValue("uni_textureY",0);

    glActiveTexture(GL_TEXTURE0);
    glBindTexture(GL_TEXTURE_2D, m_textures[0]);
    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_nVideoW, m_nVideoH, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_pBufYuv420p);
    glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    m_pOpenGLShader->SetUniformValue("uni_textureU",1);
    glActiveTexture(GL_TEXTURE1);
    glBindTexture(GL_TEXTURE_2D, m_textures[1]);
    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE,m_nVideoW/2, m_nVideoH/2, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, (char*)(m_pBufYuv420p+m_yFrameLength));
    glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    m_pOpenGLShader->SetUniformValue("uni_textureV",2);
    glActiveTexture(GL_TEXTURE2);
    glBindTexture(GL_TEXTURE_2D, m_textures[2]);
    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_nVideoW/2, m_nVideoH/2, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, (char*)(m_pBufYuv420p+m_yFrameLength+m_uFrameLength));
    glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

    glBindTexture(GL_TEXTURE_2D, 0);
    m_pOpenGLShader->DisableAttributeArray("attr_position");
    m_pOpenGLShader->DisableAttributeArray("attr_uv");

    m_pOpenGLShader->Release();

    return;
}
  • 将参数传递给shader:mvp矩阵、顶点、偏移量等
  • 生成三个纹理,用于承载yuv三个分量的数据
  • 然后进行绘制glDrawArrays

猜你喜欢

转载自blog.csdn.net/u014078003/article/details/127992769