Introductory Graphics: Dynamic Terrain (2)

      Now it's time to write the dynamic terrain coloring part.
      We have implemented the grid control part before, but there is indeed a problem with the coloring, as follows: It can
insert image description here
      be seen at a glance that the normal vector is wrong, because when we build the plane grid, the normal vector is fully assigned:

normals[index] = new Vector3(0, 1, 0);

      So we need to recalculate the normal vector according to the specific grid vertex transformation. Of course, the recalculation is also divided into c# calculation and shader calculation. Here we use c# calculation.
      First, we use c# to reconstruct the grid coordinates, as follows:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;

[RequireComponent(typeof(MeshRenderer))]
[RequireComponent(typeof(MeshFilter))]
public class GroundPanelMesh : MonoBehaviour
{
    
    
    public Texture2D noiseTex;
    [Range(1f, 100f)]
    public float heightInten = 1f;

    public int cellCount = 50;
    public float cellWidth = 5f;

    private MeshRenderer meshRender;
    private MeshFilter meshFilter;
    private Mesh mesh;

    private void Awake()
    {
    
    
        meshRender = GetComponent<MeshRenderer>();
        meshFilter = GetComponent<MeshFilter>();
        mesh = new Mesh();
    }

    void Start()
    {
    
    

    }

    void Update()
    {
    
    

    }

    public void CreateMesh()
    {
    
    
        if (mesh != null)
        {
    
    
            mesh.Clear();
        }
        int cellVertexCount = cellCount + 1;
        Vector3[] vertices = new Vector3[cellVertexCount * cellVertexCount];
        Vector3[] normals = new Vector3[cellVertexCount * cellVertexCount];
        Vector2[] uvs = new Vector2[cellVertexCount * cellVertexCount];
        int[] triangles = new int[cellCount * cellCount * 6];
        int triindex = 0;
        Vector3 halfbias = new Vector3(cellCount * cellWidth / 2f, 0, cellCount * cellWidth / 2f);
        //逐行扫描
        //居中生成
        for (int y = 0; y <= cellCount; y++)
        {
    
    
            for (int x = 0; x <= cellCount; x++)
            {
    
    
                int index = cellVertexCount * y + x;
                vertices[index] = new Vector3(x * cellWidth, 0, y * cellWidth) - halfbias;
                normals[index] = new Vector3(0, 1, 0);
                uvs[index] = new Vector2((float)x / (float)cellCount, (float)y / (float)cellCount);
                if (x < cellCount && y < cellCount)
                {
    
    
                    int topindex = x + y * cellVertexCount;
                    int bottomindex = x + (y + 1) * cellVertexCount;
                    triangles[triindex + 5] = topindex;
                    triangles[triindex + 4] = topindex + 1;
                    triangles[triindex + 3] = bottomindex + 1;
                    triangles[triindex + 2] = topindex;
                    triangles[triindex + 1] = bottomindex + 1;
                    triangles[triindex] = bottomindex;
                    triindex += 6;
                }
            }
        }

        mesh.vertices = vertices;
        mesh.normals = normals;
        mesh.triangles = triangles;
        mesh.uv = uvs;

        meshFilter.sharedMesh = mesh;
    }

    /// <summary>
    /// 根据噪声高度图生成网格
    /// </summary>
    public void CreateMeshWithNoise()
    {
    
    
        if (mesh != null)
        {
    
    
            mesh.Clear();
        }
        int cellVertexCount = cellCount + 1;
        Vector3[] vertices = new Vector3[cellVertexCount * cellVertexCount];
        Vector2[] uvs = new Vector2[cellVertexCount * cellVertexCount];
        int[] triangles = new int[cellCount * cellCount * 6];
        int triindex = 0;
        Vector3 halfbias = new Vector3(cellCount * cellWidth / 2f, 0, cellCount * cellWidth / 2f);
        int texwid = noiseTex.width;
        int texhei = noiseTex.height;
        for (int y = 0; y <= cellCount; y++)
        {
    
    
            for (int x = 0; x <= cellCount; x++)
            {
    
    
                int index = cellVertexCount * y + x;
                vertices[index] = new Vector3(x * cellWidth, 0, y * cellWidth) - halfbias;
                //采样噪声图,计算高度
                int px = (int)((float)x / (float)cellCount * texwid);
                int py = (int)((float)y / (float)cellCount * texhei);
                Color col = noiseTex.GetPixel(px, py);
                float r = col.r;
                Vector3 vh = new Vector3(0, r * heightInten, 0);
                vertices[index] += vh;
                uvs[index] = new Vector2((float)x / (float)cellCount, (float)y / (float)cellCount);
                if (x < cellCount && y < cellCount)
                {
    
    
                    int topindex = x + y * cellVertexCount;
                    int bottomindex = x + (y + 1) * cellVertexCount;
                    triangles[triindex + 5] = topindex;
                    triangles[triindex + 4] = topindex + 1;
                    triangles[triindex + 3] = bottomindex + 1;
                    triangles[triindex + 2] = topindex;
                    triangles[triindex + 1] = bottomindex + 1;
                    triangles[triindex] = bottomindex;
                    triindex += 6;
                }
            }
        }

        mesh.vertices = vertices;
        mesh.triangles = triangles;
        mesh.uv = uvs;
        //重计算法向量
        mesh.RecalculateNormals();

        meshFilter.sharedMesh = mesh;
    }
}

      Add a function to recalculate mesh vertices and normal vectors based on noise maps, and the effect is as follows: In
insert image description here
      this case, the calculation based on normal vector lighting can be solved, but using c# to process vertices and normal vectors will cause a problem, as follows: tessellation "failed", of course, the tessellation is not really "failed", but because if the noise height map is calculated in c# instead of sampling in the shader's vert function, the tessellation cannot be interpolated and subdivided according to the noise height map, so the tessellation function is "failed"
insert image description here
      .
      Then our best way is to put all the noise height and normal vector calculations in the shader. As mentioned earlier, I run it for the PC platform. If it needs to run on the mobile phone platform, this step is basically ok. Tessellation is not required, because the GPU of the embedded device does not necessarily support this graphics feature.
      In this case, it is necessary to develop a tool to generate the normal map of the grid model, as follows:
insert image description here
      First, the normal map with n*m resolution to be generated is on the left, and the grid uv data of UV1-UVn is on the right. Of course, the number of pixels must be greater than or equal to the number of uvs, so a mapping problem needs to be solved, as follows:
      P(x,y) = UV(?)
      is how any pixel in the normal map matches the relationship between the grid UV data?
      My solution is to calculate the Pn->UV mapping through the topological triangle surface of the grid, as follows: the
insert image description here
      pixel Pn is in a topological triangle surface of the grid, then the UV mapping calculation of Pn can also obtain the calculation formula of the normal vector, as follows: N1 can be obtained by intersecting the NbNp
insert image description here
      vector and the NaNc vector, and the Np normal vector can be calculated, then the core is to calculate the intersection of the two-dimensional ray and the line segment.
      Well, the analysis is here, and the code is implemented, as follows:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEditor;
using System.IO;

public class EditorMeshNormalTextureGenerator : EditorWindow
{
    
    
    private string texWidString;
    private int texWidth;
    private string texHeiString;
    private int texHeight;

    private string texName;

    private Transform selectTrans;
    private MeshFilter meshFilter;
    private Mesh mesh;

    private Texture2D nmTex;
    private Color[] pix;

    private int[] triangles;
    private Vector2[] uvs;
    private Vector3[] normals;

    private int unCheck = 0;

    [MenuItem("GameTools/InfinityGround/GenerateMeshNormalTexture")]
    static void execute()
    {
    
    
        EditorMeshNormalTextureGenerator win = (EditorMeshNormalTextureGenerator)EditorWindow.GetWindow(typeof(EditorMeshNormalTextureGenerator), false, "GenerateMeshNormalTexture");
        win.Show();
    }

    private void OnGUI()
    {
    
    
        EditorGUILayout.LabelField("选择网格模型物体");
        selectTrans = Selection.activeTransform;
        if (selectTrans != null)
        {
    
    
            EditorGUILayout.ObjectField(new GUIContent("模型:"), selectTrans, typeof(Transform), false);
        }
        EditorGUILayout.LabelField("输入Texture宽度");
        texWidString = EditorGUILayout.TextField("int类型:", texWidString);
        EditorGUILayout.LabelField("输入Texture高度");
        texHeiString = EditorGUILayout.TextField("int类型:", texHeiString);
        EditorGUILayout.LabelField("输入Texture名称");
        texName = EditorGUILayout.TextField("string类型:", texName);
        if (GUILayout.Button("生成法线贴图"))
        {
    
    
            if (selectTrans == null)
            {
    
    
                this.ShowNotification(new GUIContent("请选择网格模型物体"));
                return;
            }
            if (!int.TryParse(texWidString, out texWidth)
                || !int.TryParse(texHeiString, out texHeight))
            {
    
    
                this.ShowNotification(new GUIContent("请输入int类型长宽,float类型缩放"));
                return;
            }
            meshFilter = selectTrans.GetComponent<MeshFilter>();
            mesh = meshFilter.sharedMesh;
            GetMeshParams();
            Generate();
        }
    }

    /// <summary>
    /// 获取mesh参数
    /// </summary>
    private void GetMeshParams()
    {
    
    
        triangles = mesh.triangles;
        uvs = mesh.uv;
        normals = mesh.normals;
    }

    #region ///像素和uv关系检测
    /// <summary>
    /// 检测pixeluv和uva共点
    /// </summary>
    /// <param name="pxuv"></param>
    /// <param name="uva"></param>
    /// <returns></returns>
    private bool CheckPixelInDot(Vector2 pxuv, Vector2 uva)
    {
    
    
        if (Mathf.Approximately(pxuv.x, uva.x)
            && Mathf.Approximately(pxuv.y, uva.y))
        {
    
    
            return true;
        }
        return false;
    }
    /// <summary>
    /// 检测pixeluv在uva-uvb线段上
    /// </summary>
    /// <param name="pxuv"></param>
    /// <param name="uva"></param>
    /// <param name="uvb"></param>
    /// <returns></returns>
    private bool CheckPixelInLine(Vector2 pxuv, Vector2 uva, Vector2 uvb)
    {
    
    
        float k = (uvb.y - uva.y) / (uvb.x - uva.x);
        float d = uva.y - k * uva.x;

        float y1 = pxuv.y;
        float y2 = k * pxuv.x + d;

        if (Mathf.Approximately(y1, y2))
        {
    
    
            return true;
        }
        return false;
    }
    /// <summary>
    /// 检测pixeluv在uva、uvb、uvc三角形中
    /// </summary>
    /// <param name="pxuv"></param>
    /// <param name="uva"></param>
    /// <param name="uvb"></param>
    /// <param name="uvc"></param>
    /// <returns></returns>
    private bool CheckPixelInTriangle(Vector2 pxuv, Vector2 uva, Vector2 uvb, Vector2 uvc)
    {
    
    
        Vector2 pa = uva - pxuv;
        Vector2 pb = uvb - pxuv;
        Vector2 pc = uvc - pxuv;

        float angleapb = Vector2.Angle(pb, pa);
        float anglebpc = Vector2.Angle(pc, pb);
        float anglecpa = Vector2.Angle(pa, pc);

        float angle = angleapb + anglebpc + anglecpa;
        if (Mathf.Approximately(angle, 360f))
        {
    
    
            return true;
        }
        return false;
    }
    /// <summary>
    /// 检测pixeluv在uva、uvb、uvc外接矩形中
    /// </summary>
    /// <param name="pxuv"></param>
    /// <param name="uva"></param>
    /// <param name="uvb"></param>
    /// <param name="uvc"></param>
    /// <returns></returns>
    private bool CheckPixelInOutRect(Vector2 pxuv, Vector2 uva, Vector2 uvb, Vector2 uvc)
    {
    
    
        float[] xarr = new float[] {
    
     uva.x, uvb.x, uvc.x };
        float[] yarr = new float[] {
    
     uva.y, uvb.y, uvc.y };
        float xmin = GetFloatArrMin(xarr);
        float xmax = GetFloatArrMax(xarr);
        float ymin = GetFloatArrMin(yarr);
        float ymax = GetFloatArrMax(yarr);
        if (pxuv.x > xmax
            || pxuv.x < xmin
            || pxuv.y > ymax
            || pxuv.y < ymin)
        {
    
    
            return false;
        }
        return true;
    }

    private float GetFloatArrMin(float[] arr)
    {
    
    
        float min = float.MaxValue;
        for (int i = 0; i < arr.Length; i++)
        {
    
    
            if (min > arr[i])
                min = arr[i];
        }
        return min;
    }

    private float GetFloatArrMax(float[] arr)
    {
    
    
        float max = float.MinValue;
        for (int i = 0; i < arr.Length; i++)
        {
    
    
            if (max < arr[i])
                max = arr[i];
        }
        return max;
    }
    #endregion

    #region ///像素法向量计算
    private Vector3 GetPixelNormal(Vector2Int px)
    {
    
    
        Vector2 pxuv = new Vector2((float)px.x / (float)texWidth, (float)px.y / (float)texHeight);

        Vector3 pxnorm = new Vector3(0, 0, 1);

        bool check = false;

        for (int i = 0; i < triangles.Length; i += 3)
        {
    
    
            int ta = triangles[i];
            int tb = triangles[i + 1];
            int tc = triangles[i + 2];

            Vector2 uva = uvs[ta];
            Vector2 uvb = uvs[tb];
            Vector2 uvc = uvs[tc];

            Vector3 norma = normals[ta];
            Vector3 normb = normals[tb];
            Vector3 normc = normals[tc];

            //首先检测在abc外接矩形中
            if (CheckPixelInOutRect(pxuv, uva, uvb, uvc))
            {
    
    
                //然后检测是否和abc三个点共点
                if (CheckPixelInDot(pxuv, uva))
                {
    
    
                    pxnorm = norma;
                    check = true;
                    break;
                }
                if (CheckPixelInDot(pxuv, uvb))
                {
    
    
                    pxnorm = normb;
                    check = true;
                    break;
                }
                if (CheckPixelInDot(pxuv, uvc))
                {
    
    
                    pxnorm = normc;
                    check = true;
                    break;
                }
                //再检测是否在ab、bc、ca线段上
                if (CheckPixelInLine(pxuv, uva, uvb))
                {
    
    
                    float k = Vector2.Distance(uva, pxuv) / Vector2.Distance(uva, uvb);
                    pxnorm = norma + (normb - norma) * k;
                    check = true;
                    break;
                }
                if (CheckPixelInLine(pxuv, uvb, uvc))
                {
    
    
                    float k = Vector2.Distance(uvb, pxuv) / Vector2.Distance(uvb, uvc);
                    pxnorm = normb + (normc - normb) * k;
                    check = true;
                    break;
                }
                if (CheckPixelInLine(pxuv, uvc, uva))
                {
    
    
                    float k = Vector2.Distance(uvc, pxuv) / Vector2.Distance(uvc, uva);
                    pxnorm = normc + (norma - normc) * k;
                    check = true;
                    break;
                }
                //最后再检测是否在三角形abc中
                if (CheckPixelInTriangle(pxuv, uva, uvb, uvc))
                {
    
    
                    float xa = uva.x, ya = uva.y, xb = uvb.x, yb = uvb.y, xc = uvc.x, yc = uvc.y, xp = pxuv.x, yp = pxuv.y;

                    float k1 = (yp - yb) / (xp - xb);
                    float d1 = yb - k1 * xb;

                    float k2 = (yc - ya) / (xc - xa);
                    float d2 = ya - k2 * xa;

                    float p1x = (d2 - d1) / (k1 - k2);
                    float p1y = k1 * p1x + d1;

                    Vector2 p1uv = new Vector2(p1x, p1y);

                    float x1 = Vector2.Distance(uva, p1uv);
                    float y1 = Vector2.Distance(p1uv, uvc);

                    Vector3 p1norm = norma + (normc - norma) * x1 / (x1 + y1);

                    float x2 = Vector2.Distance(p1uv, pxuv);
                    float y2 = Vector2.Distance(pxuv, uvb);

                    pxnorm = p1norm + (normb - p1norm) * x2 / (x2 + y2);

                    check = true;
                    break;
                }
            }
        }

        if (!check)
        {
    
    
            unCheck++;
#if UNITY_EDITOR
            Debug.LogErrorFormat("EditorMeshNormalTextureGenerator GetPixelNormal Error px = {0} pxuv = {1} uncheck = {2}", px, pxuv, unCheck);
#endif
        }

        return pxnorm;
    }

    #endregion

    /// <summary>
    /// 法向量转颜色值
    /// </summary>
    /// <param name="vec"></param>
    /// <returns></returns>
    private Color NormalVectorToColor(Vector3 vec)
    {
    
    
        float r = PackNormal(vec.x);
        float g = PackNormal(vec.y);
        float b = PackNormal(vec.z);
        return new Color(r, g, b, 1);
    }

    /// <summary>
    /// r=[-1,1]
    /// 转换到[0,1]
    /// </summary>
    /// <param name="r"></param>
    /// <returns></returns>
    private float PackNormal(float r)
    {
    
    
        float g = (r + 1f) / 2f;
        return g;
    }

    private void Generate()
    {
    
    
        unCheck = 0;

        pix = new Color[texWidth * texHeight];

        for (int y = 0; y < texHeight; y++)
        {
    
    
            for (int x = 0; x < texWidth; x++)
            {
    
    
                Vector2Int px = new Vector2Int(x, y);
                Vector3 norm = GetPixelNormal(px);
                pix[y * texWidth + x] = NormalVectorToColor(norm);
            }
        }

        nmTex = new Texture2D(texWidth, texHeight);
        nmTex.SetPixels(pix);
        nmTex.Apply();

        byte[] buffer = nmTex.EncodeToJPG();
        string filepath = Application.dataPath + "/InfinityGround/Texture/" + texName + ".jpg";
        File.WriteAllBytes(filepath, buffer);
        AssetDatabase.Refresh();
    }
}

      The core function is to calculate the relationship between Pn and UV triangular surface, then calculate the normal vector by weight, and test the effect, as follows:
insert image description here

      Many problems can be found here. First of all, the generated normal map is colorful, which is easy to understand. We have talked about normal mapping before and know the difference between the normal vectors in tangent space and model space. At present, the normal vectors we create normal maps are in model space, and the normal vectors are oriented "in all directions", so the color value is "colorful".
      Secondly, I found that the created texture was just full of noise, but there were still a bunch of "purple gaps", which meant that the pixels were "irrelevant" to all the UV triangles, so I had to print and debug it.

private void Generate()
    {
    
    
        unCheck = 0;

        nmPix = new Color[texWidth * texHeight];

        ckPix = new Color[texWidth * texHeight];

        for (int y = 0; y < texHeight; y++)
        {
    
    
            for (int x = 0; x < texWidth; x++)
            {
    
    
                int index = y * texWidth + x;
                Vector2Int px = new Vector2Int(x, y);
                bool check;
                Vector3 norm = GetPixelNormal(px, out check);
                nmPix[index] = NormalVectorToColor(norm);
                ckPix[index] = check ? Color.white : Color.black;
            }
        }

        nmTex = new Texture2D(texWidth, texHeight);
        nmTex.SetPixels(nmPix);
        nmTex.Apply();

        byte[] buffer = nmTex.EncodeToJPG();
        string filepath = Application.dataPath + "/InfinityGround/Texture/" + texName + ".jpg";
        File.WriteAllBytes(filepath, buffer);

        ckTex = new Texture2D(texWidth, texHeight);
        ckTex.SetPixels(ckPix);
        ckTex.Apply();

        buffer = ckTex.EncodeToJPG();
        filepath = Application.dataPath + "/InfinityGround/Texture/" + texName + "_check.jpg";
        File.WriteAllBytes(filepath, buffer);

        AssetDatabase.Refresh();
    }

      After the generation, I found the following situation:
insert image description here
      It can be seen from the black and white detection image that there is no detection abnormality except for the noise, so first solve the noise, the method I think is to reduce the detection accuracy, because the floating-point number judgment in mathematics has a difference in accuracy, if the accuracy is too high, the detection is too strict and the probability of "myopia judgment failure" is higher, so first try to reduce the detection accuracy, as follows:

    private float floatTole;

		#region ///浮点判断计算

    private bool FloatApproximate(float a, float b)
    {
    
    
        float tole = Mathf.Abs(a * floatTole);
        if (a > b + tole || a < b - tole)
        {
    
    
            return false;
        }
        return true;
    }

    private bool Vector2Approximate(Vector2 a, Vector2 b)
    {
    
    
        float ax = a.x, ay = a.y, bx = b.x, by = b.y;
        if (!FloatApproximate(ax, bx) || !FloatApproximate(ay, by))
        {
    
    
            return false;
        }
        return true;
    }

    private bool Vector3Approximate(Vector3 a, Vector3 b)
    {
    
    
        float ax = a.x, ay = a.y, bx = b.x, by = b.y, az = a.z, bz = b.z;
        if (!FloatApproximate(ax, bx) || !FloatApproximate(ay, by) || !FloatApproximate(az, bz))
        {
    
    
            return false;
        }
        return true;
    }
    #endregion

      We encapsulate the floating-point judgment method, and then use different floating-point tolerances for generation, as follows: the
insert image description here
      smaller the tolerance, the smoother the normal vector linear interpolation, but the more noise. Conversely, the larger the tolerance, the rougher the normal vector interpolation, but the noise is gone. Moreover, the floating-point tolerance calculation just solves the problem of judging the gap.
      Then when applied to the map block, you can only test the tolerance value a little bit. I have generated several normal maps with different tolerances in turn, as follows:
insert image description here
      I personally think that the tolerance is the best when the tolerance is 0.00001.
      Then we have to deal with the problem of transferring the normal vector to the tangent space. Before we talked about the concept of normal mapping, we know the TBN (Tangent, BiTangent, Normal) matrix, which is used to process the coordinate transformation from the model space to the tangent space, as follows: At the same time, Unity provides our TBN matrix example, as follows
insert image description here
      :

#define TANGENT_SPACE_ROTATION \
    float3 binormal = cross( normalize(v.normal), normalize(v.tangent.xyz) ) * v.tangent.w; \
    float3x3 rotation = float3x3( v.tangent.xyz, binormal, v.normal )

      This means that if we simply process the normal vector to the tangent space, the normal vector itself is N in TBN, then the processed tangent space normals are all (0,0,1), and the texture is pure purple, as follows:
insert image description here

      Here we are going to talk about a concept, that is, baking the normal map .
      Generally, we use 3dmax or blender to make high-precision models. This kind of model cannot be used directly in the engine. Artists have to reduce the number of faces to an appropriate level, so that ordinary computers or mobile phones can also process rendering. But in this way, the simple model B does not have the lighting details of the high model A, so if there is a texture that records all the bump details of the high model A relative to the simple model B, the lighting effect of the high model A can be obtained in the shading and lighting calculation of the simple model B.
      Note, the most important "relative to" concept. The normal map is the final texture formed by the normal vector of the high model A relative to the tangent space of the simple model B. As follows: If we take the terrain block as an example, after the normal vector N1 of the high-mode (that is, the grid processed by the noise height) is transformed relative to the TBN matrix of the simple mode (that is, the plane grid), the tangent space normal vector of the high-mode A relative to the simple mode B
insert image description here       is obtained .       Write code below:

    private Matrix3x3 tbnMat = new Matrix3x3(new Vector3(0, 0, 1), new Vector3(1, 0, 0), new Vector3(0, 1, 0));

	pxnorm = tbnMat * pxnorm.normalized;
    return pxnorm.normalized;

      The final normal map is generated as follows:
insert image description here
      Next, use this tangent space normal map for rendering, as follows:

Shader "InfinityGround/GroundChunkTesselSurfaceShader"
{
    
    
    Properties
    {
    
    
        _Color ("Color", Color) = (1,1,1,1)
        _MainTex ("Albedo (RGB)", 2D) = "white" {
    
    }
        _Glossiness ("Smoothness", Range(0,1)) = 0.5
        _Metallic ("Metallic", Range(0,1)) = 0.0
        _TesselMin("Tessellation Min Distance",Range(0,200)) = 1
        _TesselMax("Tessellation Max Distance",Range(0,400)) = 1
        _TesselFactor("Tessellation Factor",Range(1,20)) = 5
        _NoiseTex("Noise Texture",2D) = "white" {
    
    }
        _HeightInten("Height Intensity",Range(0,100)) = 10
        _NormalTex("Normal Texture",2D) = "white" {
    
    }
        [Toggle]_IsNorm("Is Apply Normal",int) = 0
    }
    SubShader
    {
    
    
        Tags {
    
     "RenderType"="Opaque" }
        LOD 200

        CGPROGRAM

        #pragma surface surf Standard fullforwardshadows vertex:vert tessellate:tess

        #pragma target 5.0

        #include "Tessellation.cginc"

        sampler2D _MainTex;

        struct Input
        {
    
    
            float2 uv_MainTex;
        };

        half _Glossiness;
        half _Metallic;
        fixed4 _Color;

        float _TesselMin;
        float _TesselMax;
        int _TesselFactor;
        sampler2D _NoiseTex;
        float _HeightInten;

        sampler2D _NormalTex;
        int _IsNorm;

        float4 tess(appdata_tan v0,appdata_tan v1,appdata_tan v2)
        {
    
    
            float4 v = UnityDistanceBasedTess(v0.vertex,v1.vertex, v2.vertex,_TesselMin,_TesselMax,_TesselFactor);
            return v;
        }

        void vert(inout appdata_tan v)
        {
    
    
            float3 normal = UnityObjectToWorldNormal(v.normal);
            float r = tex2Dlod(_NoiseTex,v.texcoord).r;
            v.vertex+=float4(normal*_HeightInten*r,0);
        }

        UNITY_INSTANCING_BUFFER_START(Props)
        UNITY_INSTANCING_BUFFER_END(Props)

        void surf (Input IN, inout SurfaceOutputStandard o)
        {
    
    
            fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
            o.Albedo = c.rgb;
            o.Metallic = _Metallic;
            o.Smoothness = _Glossiness;
            o.Alpha = c.a;
            if(_IsNorm==1)
            {
    
    
                o.Normal = UnpackNormal(tex2D(_NormalTex, IN.uv_MainTex));
            }
        }
        ENDCG
    }
    FallBack "Diffuse"
}

      The effect is as follows:
insert image description here
      In this way, we have completed the final tessellation with normal lighting effect.
      Of course, there are two icing on the cake processing:
      1. The linear interpolation of the normal map is not smooth enough, and the resolution of the normal map is not high enough. We can solve this problem by increasing the number of vertices in the high-mode (that is, noise height calculation) and increasing the generation resolution, but the generation speed can be a meal.
      2. Is there a general method for generating tangent space normal maps? In fact, it is very simple. We get the grid data of any high model A and its simple model B, including data such as uv, normal vector, tangent line, and vertex coordinates. Then, through the above UV triangle detection and linear interpolation method, the high-mode A model space normal vector and the simple mode B vertex tangent space TBN matrix corresponding to each pixel of the normal map are obtained, and the final tangent space normal vector can be calculated.
      If there is a need and time, I will make a unity plug-in function, and use multi-threading and ComputeShader for calculation acceleration.

Guess you like

Origin blog.csdn.net/yinhun2012/article/details/122382905