Unity Shader总结(十三)——深度和法线纹理、改进的运动模糊和边缘检测及高度雾的生成


深度纹理实际就是一张渲染纹理,它里面存储的像素值是一个高精度的深度值,范围是[0,1],非线性分布;

如何获取

脚本中:
深度:camera.depthTextureMode = DepthTextureMode.Depth
深度+法线:camera.depthTextureMode = DepthTextureMode.DepthNormals

深度和深度+法线:camera.depthTextureMode | = DepthTextureMode.Depth
                               camera.depthTextureMode | = DepthTextureMode.DepthNormals
采样:SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv)——绝大多数情况tex2D也可以,这里防止某些平台特殊处理;

查看

在片元着色器中用代码输出深度和法线值后如果画面全黑或全白,将摄像机的远剪裁平面调小;

运动模糊

运动模糊的另一种方法是速度映射图:利用深度纹理在片元着色器中为每个像素计算其在世界空间下的位置,计算前一帧和当前帧的位置差,生成该像素的速度。

脚本

using UnityEngine;
using System.Collections;

public class MotionBlurWithDepthTexture : PostEffectsBase {
    
    

	public Shader motionBlurShader;
	private Material motionBlurMaterial = null;

	public Material material {
    
      
		get {
    
    
			motionBlurMaterial = CheckShaderAndCreateMaterial(motionBlurShader, motionBlurMaterial);
			return motionBlurMaterial;
		}  
	}

	private Camera myCamera;
	public Camera camera {
    
    
		get {
    
    
			if (myCamera == null) {
    
    
				myCamera = GetComponent<Camera>();
			}
			return myCamera;
		}
	}

	[Range(0.0f, 1.0f)]
	public float blurSize = 0.5f;
	//保存上一帧的视角*投影矩阵
	private Matrix4x4 previousViewProjectionMatrix;
	
	void OnEnable() {
    
    
		camera.depthTextureMode |= DepthTextureMode.Depth;

		previousViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;
	}
	
	void OnRenderImage (RenderTexture src, RenderTexture dest) {
    
    
		if (material != null) {
    
    
			material.SetFloat("_BlurSize", blurSize);

			material.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix);
			Matrix4x4 currentViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;
			//当前帧的视角*投影
			Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
			material.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix);
			previousViewProjectionMatrix = currentViewProjectionMatrix;

			Graphics.Blit (src, dest, material);
		} else {
    
    
			Graphics.Blit(src, dest);
		}
	}
}

shader

// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'

Shader "Unity Shaders Book/Chapter 13/Motion Blur With Depth Texture" {
    
    
	Properties {
    
    
		_MainTex ("Base (RGB)", 2D) = "white" {
    
    }
		_BlurSize ("Blur Size", Float) = 1.0
	}
	SubShader {
    
    
		CGINCLUDE
		
		#include "UnityCG.cginc"
		
		sampler2D _MainTex;
		half4 _MainTex_TexelSize;
		//unity传递的深度纹理
		sampler2D _CameraDepthTexture;
		//脚本传递的矩阵
		float4x4 _CurrentViewProjectionInverseMatrix;
		float4x4 _PreviousViewProjectionMatrix;
		half _BlurSize;
		
		struct v2f {
    
    
			float4 pos : SV_POSITION;
			half2 uv : TEXCOORD0;
			half2 uv_depth : TEXCOORD1;
		};
		
		v2f vert(appdata_img v) {
    
    
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			
			o.uv = v.texcoord;
			o.uv_depth = v.texcoord;
			//平台差异化处理
			#if UNITY_UV_STARTS_AT_TOP
			if (_MainTex_TexelSize.y < 0)
				o.uv_depth.y = 1 - o.uv_depth.y;
			#endif
					 
			return o;
		}
		
		fixed4 frag(v2f i) : SV_Target {
    
    
			// 对深度纹理采样得到深度值
			float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
			//将深度值重新映射回NDC
			float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
			// 视角*投影逆矩阵变换
			float4 D = mul(_CurrentViewProjectionInverseMatrix, H);
			// 世界空间下的坐标表示
			float4 worldPos = D / D.w;
			
			// Current viewport position 
			float4 currentPos = H;
			//计算前一帧在NDC下坐标
			float4 previousPos = mul(_PreviousViewProjectionMatrix, worldPos);
			previousPos /= previousPos.w;
			
			// 计算位置差得到速度
			float2 velocity = (currentPos.xy - previousPos.xy)/2.0f;
			//利用速度值对领域像素采样,相加后取平均,利用_BlurSize控制采样距离
			float2 uv = i.uv;
			float4 c = tex2D(_MainTex, uv);
			uv += velocity * _BlurSize;
			for (int it = 1; it < 3; it++, uv += velocity * _BlurSize) {
    
    
				float4 currentColor = tex2D(_MainTex, uv);
				c += currentColor;
			}
			c /= 3;
			
			return fixed4(c.rgb, 1.0);
		}
		
		ENDCG
		
		Pass {
    
          
			ZTest Always Cull Off ZWrite Off
			    	
			CGPROGRAM  
			
			#pragma vertex vert  
			#pragma fragment frag  
			  
			ENDCG  
		}
	} 
	FallBack Off
}

基于屏幕后处理的全局雾效

高度雾
脚本:

using UnityEngine;
using System.Collections;

public class FogWithDepthTexture : PostEffectsBase {
    
    

	public Shader fogShader;
	private Material fogMaterial = null;

	public Material material {
    
      
		get {
    
    
			fogMaterial = CheckShaderAndCreateMaterial(fogShader, fogMaterial);
			return fogMaterial;
		}  
	}

	private Camera myCamera;
	public Camera camera {
    
    
		get {
    
    
			if (myCamera == null) {
    
    
				myCamera = GetComponent<Camera>();
			}
			return myCamera;
		}
	}

	private Transform myCameraTransform;
	public Transform cameraTransform {
    
    
		get {
    
    
			if (myCameraTransform == null) {
    
    
				myCameraTransform = camera.transform;
			}

			return myCameraTransform;
		}
	}

	//控制雾的浓度
	[Range(0.0f, 3.0f)]
	public float fogDensity = 1.0f;
	//控制雾的颜色
	public Color fogColor = Color.white;
	//雾的起始高度和终止高度
	public float fogStart = 0.0f;
	public float fogEnd = 2.0f;

	void OnEnable() {
    
    
		camera.depthTextureMode |= DepthTextureMode.Depth;
	}
	
	void OnRenderImage (RenderTexture src, RenderTexture dest) {
    
    
		if (material != null) {
    
    
			
			Matrix4x4 frustumCorners = Matrix4x4.identity;

			float fov = camera.fieldOfView;
			float near = camera.nearClipPlane;
			float aspect = camera.aspect;

			float halfHeight = near * Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad);
			Vector3 toRight = cameraTransform.right * halfHeight * aspect;
			Vector3 toTop = cameraTransform.up * halfHeight;

			Vector3 topLeft = cameraTransform.forward * near + toTop - toRight;
			float scale = topLeft.magnitude / near;

			topLeft.Normalize();
			topLeft *= scale;

			Vector3 topRight = cameraTransform.forward * near + toRight + toTop;
			topRight.Normalize();
			topRight *= scale;

			Vector3 bottomLeft = cameraTransform.forward * near - toTop - toRight;
			bottomLeft.Normalize();
			bottomLeft *= scale;

			Vector3 bottomRight = cameraTransform.forward * near + toRight - toTop;
			bottomRight.Normalize();
			bottomRight *= scale;

			frustumCorners.SetRow(0, bottomLeft);
			frustumCorners.SetRow(1, bottomRight);
			frustumCorners.SetRow(2, topRight);
			frustumCorners.SetRow(3, topLeft);

			material.SetMatrix("_FrustumCornersRay", frustumCorners);

			material.SetFloat("_FogDensity", fogDensity);
			material.SetColor("_FogColor", fogColor);
			material.SetFloat("_FogStart", fogStart);
			material.SetFloat("_FogEnd", fogEnd);

			Graphics.Blit (src, dest, material);
		} else {
    
    
			Graphics.Blit(src, dest);
		}
	}
}

shader

// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'

Shader "Unity Shaders Book/Chapter 13/Fog With Depth Texture" {
    
    
	Properties {
    
    
		_MainTex ("Base (RGB)", 2D) = "white" {
    
    }
		_FogDensity ("Fog Density", Float) = 1.0
		_FogColor ("Fog Color", Color) = (1, 1, 1, 1)
		_FogStart ("Fog Start", Float) = 0.0
		_FogEnd ("Fog End", Float) = 1.0
	}
	SubShader {
    
    
		CGINCLUDE
		
		#include "UnityCG.cginc"
		
		float4x4 _FrustumCornersRay;
		
		sampler2D _MainTex;
		half4 _MainTex_TexelSize;
		sampler2D _CameraDepthTexture;
		half _FogDensity;
		fixed4 _FogColor;
		float _FogStart;
		float _FogEnd;
		
		struct v2f {
    
    
			float4 pos : SV_POSITION;
			half2 uv : TEXCOORD0;
			half2 uv_depth : TEXCOORD1;
			float4 interpolatedRay : TEXCOORD2;//存储插值后的像素向量
		};
		
		v2f vert(appdata_img v) {
    
    
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			
			o.uv = v.texcoord;
			o.uv_depth = v.texcoord;
			
			#if UNITY_UV_STARTS_AT_TOP
			if (_MainTex_TexelSize.y < 0)
				o.uv_depth.y = 1 - o.uv_depth.y;
			#endif
			
			int index = 0;
			if (v.texcoord.x < 0.5 && v.texcoord.y < 0.5) {
    
    
				index = 0;
			} else if (v.texcoord.x > 0.5 && v.texcoord.y < 0.5) {
    
    
				index = 1;
			} else if (v.texcoord.x > 0.5 && v.texcoord.y > 0.5) {
    
    
				index = 2;
			} else {
    
    
				index = 3;
			}

			#if UNITY_UV_STARTS_AT_TOP
			if (_MainTex_TexelSize.y < 0)
				index = 3 - index;
			#endif
			
			o.interpolatedRay = _FrustumCornersRay[index];
				 	 
			return o;
		}
		
		fixed4 frag(v2f i) : SV_Target {
    
    
			float linearDepth = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth));
			//得到该像素的世界坐标
			//linearDepth* interpolatedRay是像素相对于摄像机的偏移量,linearDepth是由深度纹理得到的线性深度值,interpolatedRay是vert输出并插值后得到的射线;
			float3 worldPos = _WorldSpaceCameraPos + linearDepth * i.interpolatedRay.xyz;
						
			float fogDensity = (_FogEnd - worldPos.y) / (_FogEnd - _FogStart); 
			fogDensity = saturate(fogDensity * _FogDensity);
			
			fixed4 finalColor = tex2D(_MainTex, i.uv);
			finalColor.rgb = lerp(finalColor.rgb, _FogColor.rgb, fogDensity);
			
			return finalColor;
		}
		
		ENDCG
		
		Pass {
    
    
			ZTest Always Cull Off ZWrite Off
			     	
			CGPROGRAM  
			
			#pragma vertex vert  
			#pragma fragment frag  
			  
			ENDCG  
		}
	} 
	FallBack Off
}

边缘检测

在深度和法线纹理上进行边缘检测,这样图像不会受到光照和纹理的影响,实现的效果更可靠;

在这里插入图片描述
robert算子本质是计算左上角和右下角的差值,因此下面的计算会取对角方向的深度或法线值,比较它们之间的差值,如果超过某个阈值,就认为它们之间存在一条边;

脚本:

using UnityEngine;
using System.Collections;

public class EdgeDetectNormalsAndDepth : PostEffectsBase {
    
    

	public Shader edgeDetectShader;
	private Material edgeDetectMaterial = null;
	public Material material {
    
      
		get {
    
    
			edgeDetectMaterial = CheckShaderAndCreateMaterial(edgeDetectShader, edgeDetectMaterial);
			return edgeDetectMaterial;
		}  
	}

	[Range(0.0f, 1.0f)]
	public float edgesOnly = 0.0f;

	public Color edgeColor = Color.black;

	public Color backgroundColor = Color.white;
	//控制对深度+法线纹理采样时,使用的采样距离,值越大,描边越宽
	public float sampleDistance = 1.0f;
	//领域的深度或法线值相差多少时认为存在一条边,值越大,那么变化很小的地方也会形成一条边;
	public float sensitivityDepth = 1.0f;

	public float sensitivityNormals = 1.0f;
	
	void OnEnable() {
    
    
		GetComponent<Camera>().depthTextureMode |= DepthTextureMode.DepthNormals;
	}

	[ImageEffectOpaque]//不希望对透明物体产生影响
	void OnRenderImage (RenderTexture src, RenderTexture dest) {
    
    
		if (material != null) {
    
    
			material.SetFloat("_EdgeOnly", edgesOnly);
			material.SetColor("_EdgeColor", edgeColor);
			material.SetColor("_BackgroundColor", backgroundColor);
			material.SetFloat("_SampleDistance", sampleDistance);
			material.SetVector("_Sensitivity", new Vector4(sensitivityNormals, sensitivityDepth, 0.0f, 0.0f));

			Graphics.Blit(src, dest, material);
		} else {
    
    
			Graphics.Blit(src, dest);
		}
	}
}

shader

// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'

Shader "Unity Shaders Book/Chapter 13/Edge Detection Normals And Depth" {
    
    
	Properties {
    
    
		_MainTex ("Base (RGB)", 2D) = "white" {
    
    }
		_EdgeOnly ("Edge Only", Float) = 1.0
		_EdgeColor ("Edge Color", Color) = (0, 0, 0, 1)
		_BackgroundColor ("Background Color", Color) = (1, 1, 1, 1)
		_SampleDistance ("Sample Distance", Float) = 1.0
		_Sensitivity ("Sensitivity", Vector) = (1, 1, 1, 1) //xy分别对应法线和深度检测的灵敏度,zw没有用途;
	}
	SubShader {
    
    
		CGINCLUDE
		
		#include "UnityCG.cginc"
		
		sampler2D _MainTex;
		half4 _MainTex_TexelSize;//因为需要对领域像素采样,声明_MainTex_TexelSize
		fixed _EdgeOnly;
		fixed4 _EdgeColor;
		fixed4 _BackgroundColor;
		float _SampleDistance;
		half4 _Sensitivity;
		
		sampler2D _CameraDepthNormalsTexture;
		
		struct v2f {
    
    
			float4 pos : SV_POSITION;
			half2 uv[5]: TEXCOORD0;
		};
		  
		v2f vert(appdata_img v) {
    
    
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			//数组的第一个坐标存储屏幕颜色图像的采样纹理
			half2 uv = v.texcoord;
			o.uv[0] = uv;
			
			#if UNITY_UV_STARTS_AT_TOP
			if (_MainTex_TexelSize.y < 0)
				uv.y = 1 - uv.y;
			#endif
			
			o.uv[1] = uv + _MainTex_TexelSize.xy * half2(1,1) * _SampleDistance;
			o.uv[2] = uv + _MainTex_TexelSize.xy * half2(-1,-1) * _SampleDistance;
			o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1,1) * _SampleDistance;
			o.uv[4] = uv + _MainTex_TexelSize.xy * half2(1,-1) * _SampleDistance;
					 
			return o;
		}
		
		half CheckSame(half4 center, half4 sample) {
    
    
			half2 centerNormal = center.xy;
			float centerDepth = DecodeFloatRG(center.zw);
			half2 sampleNormal = sample.xy;
			float sampleDepth = DecodeFloatRG(sample.zw);
			
			// difference in normals
			// do not bother decoding normals - there's no need here
			half2 diffNormal = abs(centerNormal - sampleNormal) * _Sensitivity.x;
			int isSameNormal = (diffNormal.x + diffNormal.y) < 0.1;
			// difference in depth
			float diffDepth = abs(centerDepth - sampleDepth) * _Sensitivity.y;
			// scale the required threshold by the distance
			int isSameDepth = diffDepth < 0.1 * centerDepth;
			
			// return:
			// 1 - if normals and depth are similar enough
			// 0 - otherwise
			return isSameNormal * isSameDepth ? 1.0 : 0.0;
		}
		
		fixed4 fragRobertsCrossDepthAndNormal(v2f i) : SV_Target {
    
    
			//对深度+法线纹理采样
			half4 sample1 = tex2D(_CameraDepthNormalsTexture, i.uv[1]);
			half4 sample2 = tex2D(_CameraDepthNormalsTexture, i.uv[2]);
			half4 sample3 = tex2D(_CameraDepthNormalsTexture, i.uv[3]);
			half4 sample4 = tex2D(_CameraDepthNormalsTexture, i.uv[4]);
			
			half edge = 1.0;
			//计算对角线差值,CheckSame返回值是0或1,0就认为该两点之间存在一条边界,如果结果小于阈值,就返回1,说明差异不明显
			edge *= CheckSame(sample1, sample2);
			edge *= CheckSame(sample3, sample4);
			
			fixed4 withEdgeColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[0]), edge);
			fixed4 onlyEdgeColor = lerp(_EdgeColor, _BackgroundColor, edge);
			
			return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly);
		}
		
		ENDCG
		
		Pass {
    
     
			ZTest Always Cull Off ZWrite Off
			
			CGPROGRAM      
			
			#pragma vertex vert  
			#pragma fragment fragRobertsCrossDepthAndNormal
			
			ENDCG  
		}
	} 
	FallBack Off
}

如果希望特定的物体描边,使用Graphics.DrawMesh或Graphics.DrawMeshNow把需要描边的物体再次渲染一遍,再使用边缘检测计算深度或法线纹理中每个像素的梯度值,如果小于某个阈值,就在shader中使用clip()将该像素剔除,从而显示出原来的物体颜色;

猜你喜欢

转载自blog.csdn.net/memory_MM_forever/article/details/118491383