OpenGL Shader 自动瘦脸与眼睛放大

图像局部扭曲算法有三个:局部缩放(Local Scaling)算法、局部平移(Local Transition)算法和局部旋转(Local Rotation)算法。其中应用局部缩放算法可实现眼睛放大,局部平移算法则可用于实现瘦脸效果。 

 precision highp float;

 varying highp vec2 textureCoordinate;
 uniform sampler2D inputImageTexture;

 uniform highp float scaleRatio;// 缩放系数,0无缩放,大于0则放大
 uniform highp float radius;// 缩放算法的作用域半径
 uniform highp vec2 leftEyeCenterPosition; // 左眼控制点,越远变形越小
 uniform highp vec2 rightEyeCenterPosition; // 右眼控制点
 uniform float aspectRatio; // 所处理图像的宽高比

 highp vec2 warpPositionToUse(vec2 centerPostion, vec2 currentPosition, float radius, float scaleRatio, float aspectRatio)
 {
     vec2 positionToUse = currentPosition;

     vec2 currentPositionToUse = vec2(currentPosition.x, currentPosition.y * aspectRatio + 0.5 - 0.5 * aspectRatio);
     vec2 centerPostionToUse = vec2(centerPostion.x, centerPostion.y * aspectRatio + 0.5 - 0.5 * aspectRatio);

     float r = distance(currentPositionToUse, centerPostionToUse);

     if(r < radius)
     {
         float alpha = 1.0 - scaleRatio * pow(r / radius - 1.0, 2.0);
         positionToUse = centerPostion + alpha * (currentPosition - centerPostion);
     }

     return positionToUse; 
 }

 void main()
 {
     vec2 positionToUse = warpPositionToUse(leftEyeCenterPosition, textureCoordinate, radius, scaleRatio, aspectRatio);

     positionToUse = warpPositionToUse(rightEyeCenterPosition, positionToUse, radius, scaleRatio, aspectRatio);

     gl_FragColor = texture2D(inputImageTexture, positionToUse);   
 }

图像局部平移算法

图像局部平移算法还是参见论文,多说无益,在此奉上对应Shader代码给需要的同学,可以实现瘦脸和肥脸。这里需要指定瘦脸的控制点,最多支持MAX_CONTOUR_POINT_COUNT个控制点。

 precision highp float;

 varying highp vec2 textureCoordinate;
 uniform sampler2D inputImageTexture;

 uniform highp float radius;

 uniform highp float aspectRatio;

 uniform float leftContourPoints[MAX_CONTOUR_POINT_COUNT*2];
 uniform float rightContourPoints[MAX_CONTOUR_POINT_COUNT*2];
 uniform float deltaArray[MAX_CONTOUR_POINT_COUNT];
 uniform int arraySize;

 highp vec2 warpPositionToUse(vec2 currentPoint, vec2 contourPointA,  vec2 contourPointB, float radius, float delta, float aspectRatio)
 {
     vec2 positionToUse = currentPoint;

     vec2 currentPointToUse = vec2(currentPoint.x, currentPoint.y * aspectRatio + 0.5 - 0.5 * aspectRatio);
     vec2 contourPointAToUse = vec2(contourPointA.x, contourPointA.y * aspectRatio + 0.5 - 0.5 * aspectRatio);

     float r = distance(currentPointToUse, contourPointAToUse);
     if(r < radius)
     {
         vec2 dir = normalize(contourPointB - contourPointA);
         float dist = radius * radius - r * r;
         float alpha = dist / (dist + (r-delta) * (r-delta));
         alpha = alpha * alpha;

         positionToUse = positionToUse - alpha * delta * dir;

     }

     return positionToUse;

 }


 void main()
 {
     vec2 positionToUse = textureCoordinate;

     for(int i = 0; i < arraySize; i++)
     {

         positionToUse = warpPositionToUse(positionToUse, vec2(leftContourPoints[i * 2], leftContourPoints[i * 2 + 1]), vec2(rightContourPoints[i * 2], rightContourPoints[i * 2 + 1]), radius, deltaArray[i], aspectRatio);
         positionToUse = warpPositionToUse(positionToUse, vec2(rightContourPoints[i * 2], rightContourPoints[i * 2 + 1]), vec2(leftContourPoints[i * 2], leftContourPoints[i * 2 + 1]), radius, deltaArray[i], aspectRatio);
     }

     gl_FragColor = texture2D(inputImageTexture, positionToUse);

 }

猜你喜欢

转载自blog.csdn.net/qq_21743659/article/details/112511367
今日推荐