Essential reading notes for getting started with Unity Shader_Dynamic picture post-processing


1. Dynamic screen

1.1 Built-in time variables

insert image description here

1.2 Texture animation

Sequence frame animation
Prepare a sequence frame image

Shader "Unity Shaders Book/Chapter 11/Image Sequence Animation" {
	Properties {
		_Color ("Color Tint", Color) = (1, 1, 1, 1)
		_MainTex ("Image Sequence", 2D) = "white" {}
    	_HorizontalAmount ("Horizontal Amount", Float) = 4
    	_VerticalAmount ("Vertical Amount", Float) = 4
    	_Speed ("Speed", Range(1, 100)) = 30
	}
	SubShader {
	//半透明渲染标准tag
		Tags {"Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent"}
		
		Pass {
			Tags { "LightMode"="ForwardBase" }
			
			ZWrite Off
			Blend SrcAlpha OneMinusSrcAlpha
			
			CGPROGRAM
			
			#pragma vertex vert  
			#pragma fragment frag
			
			#include "UnityCG.cginc"
			
			fixed4 _Color;
			sampler2D _MainTex;
			float4 _MainTex_ST;
			float _HorizontalAmount;
			float _VerticalAmount;
			float _Speed;
			  
			struct a2v {  
			    float4 vertex : POSITION; 
			    float2 texcoord : TEXCOORD0;
			};  
			
			struct v2f {  
			    float4 pos : SV_POSITION;
			    float2 uv : TEXCOORD0;
			};  
			
			v2f vert (a2v v) {  
				v2f o;  
				o.pos = mul(UNITY_MATRIX_MVP, v.vertex);  
				o.uv = TRANSFORM_TEX(v.texcoord, _MainTex);  
				return o;
			}  
			
			fixed4 frag (v2f i) : SV_Target {
			//计算行列数
				float time = floor(_Time.y * _Speed);  
				float row = floor(time / _HorizontalAmount);
				float column = time - row * _HorizontalAmount;
				
//				half2 uv = float2(i.uv.x /_HorizontalAmount, i.uv.y / _VerticalAmount);
//				uv.x += column / _HorizontalAmount;
//				uv.y -= row / _VerticalAmount;
			// 采样,计算截取哪一块
				half2 uv = i.uv + half2(column, -row);
				uv.x /=  _HorizontalAmount;
				uv.y /= _VerticalAmount;
				
				fixed4 c = tex2D(_MainTex, uv);
				c.rgb *= _Color;
				
				return c;
			}
			
			ENDCG
		}  
	}
	FallBack "Transparent/VertexLit"
}

scrolling background

Shader "Unity Shaders Book/Chapter 11/Scrolling Background" {
	Properties {
	//分别是远处得场景和近处的场景
		_MainTex ("Base Layer (RGB)", 2D) = "white" {}
		_DetailTex ("2nd Layer (RGB)", 2D) = "white" {}
		_ScrollX ("Base layer Scroll Speed", Float) = 1.0
		_Scroll2X ("2nd layer Scroll Speed", Float) = 1.0
		_Multiplier ("Layer Multiplier", Float) = 1
	}
	SubShader {
		Tags { "RenderType"="Opaque" "Queue"="Geometry"}
		
		Pass { 
			Tags { "LightMode"="ForwardBase" }
			
			CGPROGRAM
			
			#pragma vertex vert
			#pragma fragment frag
			
			#include "UnityCG.cginc"
			
			sampler2D _MainTex;
			sampler2D _DetailTex;
			float4 _MainTex_ST;
			float4 _DetailTex_ST;
			float _ScrollX;
			float _Scroll2X;
			float _Multiplier;
			
			struct a2v {
				float4 vertex : POSITION;
				float4 texcoord : TEXCOORD0;
			};
			
			struct v2f {
				float4 pos : SV_POSITION;
				float4 uv : TEXCOORD0;
			};
			
			v2f vert (a2v v) {
				v2f o;
				o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
				// 滚动的x坐标
				o.uv.xy = TRANSFORM_TEX(v.texcoord, _MainTex) + frac(float2(_ScrollX, 0.0) * _Time.y);
				o.uv.zw = TRANSFORM_TEX(v.texcoord, _DetailTex) + frac(float2(_Scroll2X, 0.0) * _Time.y);
				
				return o;
			}
			
			fixed4 frag (v2f i) : SV_Target {
				fixed4 firstLayer = tex2D(_MainTex, i.uv.xy);
				fixed4 secondLayer = tex2D(_DetailTex, i.uv.zw);
				//用第二层的透明通道混合两张
				fixed4 c = lerp(firstLayer, secondLayer, secondLayer.a);
				c.rgb *= _Multiplier;
				
				return c;
			}
			
			ENDCG
		}
	}
	FallBack "VertexLit"
}

1.2 Vertex animation

fluctuating water flow

Shader "Unity Shaders Book/Chapter 11/Water" {
	Properties {
		_MainTex ("Main Tex", 2D) = "white" {}
		_Color ("Color Tint", Color) = (1, 1, 1, 1)
		_Magnitude ("Distortion Magnitude", Float) = 1
 		_Frequency ("Distortion Frequency", Float) = 1
 		_InvWaveLength ("Distortion Inverse Wave Length", Float) = 10
 		_Speed ("Speed", Float) = 0.5
	}
	SubShader {
		// Need to disable batching because of the vertex animation
		//一些subshader在使用批处理的时候会出现问题
		//这些需要特殊处理的shader通常指包含了模型空间的顶点动画的shader
		Tags {"Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent" "DisableBatching"="True"}
		
		Pass {
			Tags { "LightMode"="ForwardBase" }
			
			ZWrite Off
			Blend SrcAlpha OneMinusSrcAlpha
			//水流的每个面都能实现,关闭了剔除功能
			Cull Off
			
			CGPROGRAM  
			#pragma vertex vert 
			#pragma fragment frag
			
			#include "UnityCG.cginc" 
			
			sampler2D _MainTex;
			float4 _MainTex_ST;
			fixed4 _Color;
			float _Magnitude;
			float _Frequency;
			float _InvWaveLength;
			float _Speed;
			
			struct a2v {
				float4 vertex : POSITION;
				float4 texcoord : TEXCOORD0;
			};
			
			struct v2f {
				float4 pos : SV_POSITION;
				float2 uv : TEXCOORD0;
			};
			
			v2f vert(a2v v) {
				v2f o;
				//计算位移,只在x方向上发生位移
				float4 offset;
				offset.yzw = float3(0.0, 0.0, 0.0);
				//第一个分量控制正弦函数的频率,后面几个控制不同位置的幅度
				offset.x = sin(_Frequency * _Time.y + v.vertex.x * _InvWaveLength + v.vertex.y * _InvWaveLength + v.vertex.z * _InvWaveLength) * _Magnitude;
				o.pos = mul(UNITY_MATRIX_MVP, v.vertex + offset);
				
				o.uv = TRANSFORM_TEX(v.texcoord, _MainTex);
				o.uv +=  float2(0.0, _Time.y * _Speed);
				
				return o;
			}
			
			fixed4 frag(v2f i) : SV_Target {
				fixed4 c = tex2D(_MainTex, i.uv);
				c.rgb *= _Color.rgb;
				
				return c;
			} 
			
			ENDCG
		}
	}
	FallBack "Transparent/VertexLit"
}

Billboard
means that a polygon rendered by texture shading always faces the camera

We can first obtain

  • The surface normal of the target
  • pointing up

Through these two calculations pointing to the right direction
, there are two bases, and it is easy to calculate the third base
insert image description here

v2f vert (a2v v) {
				v2f o;
				//计算模型空间的视角方向
				float3 center = float3(0, 0, 0);
				float3 viewer = mul(_World2Object,float4(_WorldSpaceCameraPos, 1));
				//计算三个基向量
				float3 normalDir = viewer - center;
				normalDir.y =normalDir.y * _VerticalBillboarding;
				normalDir = normalize(normalDir);
				float3 upDir = abs(normalDir.y) > 0.999 ? float3(0, 0, 1) : float3(0, 1, 0);
				float3 rightDir = normalize(cross(upDir, normalDir));
				upDir = normalize(cross(normalDir, rightDir));
				
				// 只计算偏移,乘以基向量即可
				float3 centerOffs = v.vertex.xyz - center;
				float3 localPos = center + rightDir * centerOffs.x + upDir * centerOffs.y + normalDir * centerOffs.z;
              
				o.pos = mul(UNITY_MATRIX_MVP, float4(localPos, 1));
				o.uv = TRANSFORM_TEX(v.texcoord,_MainTex);

				return o;
			}

Precautions

  • Using vertex animation will reduce performance (without batching)
  • The relevant code of SHADOWCASTER needs to be written by yourself

2. Screen post-processing

2.1 Basic post-processing script system

The basis for achieving screen post-processing is to obtain the rendered screen image, that is, to capture the screen.

Unity provides the OnRenderImage function
insert image description here

  • The first parameter is the source texture
  • The second parameter is the output texture

In this function, we use the Graphics.Bilt function to complete the processing of the rendering texture
insert image description here

  • src source texture
  • dest target texture, if it is null, it will be directly displayed on the screen
  • mat is the material used, the shader corresponding to this material will perform screen post-processing operations, and the src will be passed back to the texture attribute named _MainTex in the Shader
  • When the pass is -1, it represents all passes, otherwise it refers to calling the specified index

Generally speaking, OnRenderImage will be sadly called after all opaque and transparent passes are executed, but if necessary, you can add ImageEffectOpaque to this function so that it can be executed immediately after the opaque object passes.

  • Binding Script Requirements: This is a camera, and we want to see it in the editor
[ExecuteInEditMode]
[RequireComponent (typeof(Camera))]
  • It is necessary to check whether various resources are satisfied in advance
	protected void CheckResources() {
    
    
		bool isSupported = CheckSupport();
		
		if (isSupported == false) {
    
    
			NotSupported();
		}
	}
	protected bool CheckSupport() {
    
    
		if (SystemInfo.supportsImageEffects == false || SystemInfo.supportsRenderTextures == false) {
    
    
			Debug.LogWarning("This platform does not support image effects or render textures.");
			return false;
		}
		
		return true;
	}

	// Called when the platform doesn't support this effect
	protected void NotSupported() {
    
    
		enabled = false;
	}
	protected void Start() {
    
    
		CheckResources();
	}
  • Using a Shader to create a material for processing rendering textures is a method
protected Material CheckShaderAndCreateMaterial(Shader shader, Material material) {
    
    
		if (shader == null) {
    
    
			return null;
		}
		
		if (shader.isSupported && material && material.shader == shader)
			return material;
		
		if (!shader.isSupported) {
    
    
			return null;
		}
		else {
    
    
			material = new Material(shader);
			material.hideFlags = HideFlags.DontSave;
			if (material)
				return material;
			else 
				return null;
		}
	}

2.2 Primary screen post-processing

Adjust the brightness, saturation, and contrast of the screen.
For screen effects, their materials are created temporarily.
The model is really like a quadrilateral patch,
first inherit the previous base class

  • Get a public shader and create an accessible materialList item
  • Use OnRenderImage for real special effects processing. Whenever the function is called, it will check whether the material is available. If it is not available, it will be displayed directly. If it is available, it will be handed over to Graphics.Bilt
    insert image description here

In the shader section, we must first have a _MainTex as the input of Graphics.Bilt, and then declare three attributes, which the above script will pass in.

  • Screen post-processing is actually equivalent to a masking layer, which cannot affect other objects. To turn off depth writing
    SubShader { Pass { ZTest Always Cull Off ZWrite Off

  • The vertex shader is to transform the screen coordinates and pass uv

The focus is on the fragment shader

			fixed4 frag(v2f i) : SV_Target {
    
    
				fixed4 renderTex = tex2D(_MainTex, i.uv);  
				  
				// 调整整体亮度
				fixed3 finalColor = renderTex.rgb * _Brightness;
				
				// 像素点的亮度值
				fixed luminance = 0.2125 * renderTex.r + 0.7154 * renderTex.g + 0.0721 * renderTex.b;
				fixed3 luminanceColor = fixed3(luminance, luminance, luminance);
				finalColor = lerp(luminanceColor, finalColor, _Saturation);
				
				// Apply contrast
				fixed3 avgColor = fixed3(0.5, 0.5, 0.5);
				finalColor = lerp(avgColor, finalColor, _Contrast);
				
				return fixed4(finalColor, renderTex.a);  
			}  

Edge detection
Convolution:
insert image description here
commonly used edge detection operators:
insert image description here
the operation is basically similar to the previous section
Here are the things to pay attention to

  • Parameters provided:
    insert image description here
  • The vertex shader needs to calculate the texture coordinates of edge detection, and defines a nine-dimensional texture array in the structure
			struct v2f {
    
    
				float4 pos : SV_POSITION;
				half2 uv[9] : TEXCOORD0;
			};
			  
			v2f vert(appdata_img v) {
    
    
				v2f o;
				o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
				
				half2 uv = v.texcoord;
				//uv是1x1的,size可以精准定位像素
				o.uv[0] = uv + _MainTex_TexelSize.xy * half2(-1, -1);
				o.uv[1] = uv + _MainTex_TexelSize.xy * half2(0, -1);
				o.uv[2] = uv + _MainTex_TexelSize.xy * half2(1, -1);
				o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1, 0);
				o.uv[4] = uv + _MainTex_TexelSize.xy * half2(0, 0);
				o.uv[5] = uv + _MainTex_TexelSize.xy * half2(1, 0);
				o.uv[6] = uv + _MainTex_TexelSize.xy * half2(-1, 1);
				o.uv[7] = uv + _MainTex_TexelSize.xy * half2(0, 1);
				o.uv[8] = uv + _MainTex_TexelSize.xy * half2(1, 1);
						 
				return o;
			}
  • Fragment shader
			//计算明度
			fixed luminance(fixed4 color) {
    
    
				return  0.2125 * color.r + 0.7154 * color.g + 0.0721 * color.b; 
			}
			//计算当前像素的梯度点edge
			half Sobel(v2f i) {
    
    
				const half Gx[9] = {
    
    -1,  0,  1,
										-2,  0,  2,
										-1,  0,  1};
				const half Gy[9] = {
    
    -1, -2, -1,
										0,  0,  0,
										1,  2,  1};		
				
				half texColor;
				//水平和竖直方向上
				half edgeX = 0;
				half edgeY = 0;
				for (int it = 0; it < 9; it++) {
    
    
					texColor = luminance(tex2D(_MainTex, i.uv[it]));
					edgeX += texColor * Gx[it];
					edgeY += texColor * Gy[it];
				}
				//一个化简方法
				half edge = 1 - abs(edgeX) - abs(edgeY);
				
				return edge;
			}
			
			fixed4 fragSobel(v2f i) : SV_Target {
    
    
				half edge = Sobel(i);
				
				fixed4 withEdgeColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[4]), edge);
				fixed4 onlyEdgeColor = lerp(_EdgeColor, _BackgroundColor, edge);
				return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly);
 			}

Gaussian blur
Gaussian filter
insert image description here
In order to simplify the operation, we can split a two-dimensional Gaussian function into two one-dimensional ones, and filter the image successively.
insert image description here
Next we will have two passes (one horizontal, one vertical) to filter the image.

  • The initial implementation of OnRenderImage calls two passes, creates a buffer to record the results of the first pass, and then uses the second pass.
    insert image description here
  • The advanced implementation of OnRenderImage uses scaling to downsample the image to reduce the number of pixels to be processed
    insert image description here
  • A high-level implementation of OnRenderImage that takes into account the number of Gaussian blur iterations.
	void OnRenderImage (RenderTexture src, RenderTexture dest) {
    
    
		if (material != null) {
    
    
			int rtW = src.width/downSample;
			int rtH = src.height/downSample;

			RenderTexture buffer0 = RenderTexture.GetTemporary(rtW, rtH, 0);
			buffer0.filterMode = FilterMode.Bilinear;
			//把src的图像缩放后存储到buffer0中去
			Graphics.Blit(src, buffer0);
			//不断迭代,最终buffer0存储
			for (int i = 0; i < iterations; i++) {
    
    
				material.SetFloat("_BlurSize", 1.0f + i * blurSpread);

				RenderTexture buffer1 = RenderTexture.GetTemporary(rtW, rtH, 0);

				// Render the vertical pass
				Graphics.Blit(buffer0, buffer1, material, 0);

				RenderTexture.ReleaseTemporary(buffer0);
				buffer0 = buffer1;
				buffer1 = RenderTexture.GetTemporary(rtW, rtH, 0);

				// Render the horizontal pass
				Graphics.Blit(buffer0, buffer1, material, 1);

				RenderTexture.ReleaseTemporary(buffer0);
				buffer0 = buffer1;
			}

			Graphics.Blit(buffer0, dest);
			RenderTexture.ReleaseTemporary(buffer0);
		} else {
    
    
			Graphics.Blit(src, dest);
		}
	}
  • Back to the shader, use a 5x1 convolution kernel,
    this is the horizontal vertex shader
		v2f vertBlurHorizontal(appdata_img v) {
    
    
			v2f o;
			o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
			
			half2 uv = v.texcoord;
			
			o.uv[0] = uv;
			o.uv[1] = uv + float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
			o.uv[2] = uv - float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
			o.uv[3] = uv + float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
			o.uv[4] = uv - float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
					 
			return o;
		}

vertical empathy

  • The two fragment shaders use the same
		fixed4 fragBlur(v2f i) : SV_Target {
    
    
			float weight[3] = {
    
    0.4026, 0.2442, 0.0545};
			
			fixed3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0];
			
			for (int it = 1; it < 3; it++) {
    
    
				sum += tex2D(_MainTex, i.uv[it*2-1]).rgb * weight[it];
				sum += tex2D(_MainTex, i.uv[it*2]).rgb * weight[it];
			}
			
			return fixed4(sum, 1.0);
		}

The principle of Bloom
Bloom is to extract the brighter areas in the image according to a threshold, store them in a texture, and then use Gaussian blur to blur the texture and mix it with the original image.

  • OnRenderImage
    is basically the same as the previous section, and the differences are marked below
void OnRenderImage (RenderTexture src, RenderTexture dest) {
    
    
		if (material != null) {
    
    
			material.SetFloat("_LuminanceThreshold", luminanceThreshold);

			int rtW = src.width/downSample;
			int rtH = src.height/downSample;
			
			RenderTexture buffer0 = RenderTexture.GetTemporary(rtW, rtH, 0);
			buffer0.filterMode = FilterMode.Bilinear;
			//第一个pass提取图像较亮区域			
			Graphics.Blit(src, buffer0, material, 0);
			
			for (int i = 0; i < iterations; i++) {
    
    
				material.SetFloat("_BlurSize", 1.0f + i * blurSpread);
				
				RenderTexture buffer1 = RenderTexture.GetTemporary(rtW, rtH, 0);
				
				// Render the vertical pass
				Graphics.Blit(buffer0, buffer1, material, 1);
				
				RenderTexture.ReleaseTemporary(buffer0);
				buffer0 = buffer1;
				buffer1 = RenderTexture.GetTemporary(rtW, rtH, 0);
				
				// Render the horizontal pass
				Graphics.Blit(buffer0, buffer1, material, 2);
				
				RenderTexture.ReleaseTemporary(buffer0);
				buffer0 = buffer1;
			}
			//进行最后的混合
			material.SetTexture ("_Bloom", buffer0);  
			Graphics.Blit (src, dest, material, 3);  

			RenderTexture.ReleaseTemporary(buffer0);
		} else {
    
    
			Graphics.Blit(src, dest);
		}
	}
  • Define vertex and fragment shaders that extract brighter regions
		struct v2f {
    
    
			float4 pos : SV_POSITION; 
			half2 uv : TEXCOORD0;
		};	
		
		v2f vertExtractBright(appdata_img v) {
    
    
			v2f o;
			
			o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
			
			o.uv = v.texcoord;
					 
			return o;
		}
		
		fixed luminance(fixed4 color) {
    
    
			return  0.2125 * color.r + 0.7154 * color.g + 0.0721 * color.b; 
		}
		
		fixed4 fragExtractBright(v2f i) : SV_Target {
    
    
		//减去阈值
		//结果截取在0-1内
			fixed4 c = tex2D(_MainTex, i.uv);
			fixed val = clamp(luminance(c) - _LuminanceThreshold, 0.0, 1.0);
			
			return c * val;
		}
  • Vertex fragment shader that defines the blending operation
struct v2fBloom {
    
    
			float4 pos : SV_POSITION; 
			half4 uv : TEXCOORD0;
		};
		
		v2fBloom vertBloom(appdata_img v) {
    
    
			v2fBloom o;
			
			o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
			o.uv.xy = v.texcoord;		
			o.uv.zw = v.texcoord;
			//平台差异化处理
			#if UNITY_UV_STARTS_AT_TOP			
			if (_MainTex_TexelSize.y < 0.0)
				o.uv.w = 1.0 - o.uv.w;
			#endif
				        	
			return o; 
		}
		
		fixed4 fragBloom(v2fBloom i) : SV_Target {
    
    
			return tex2D(_MainTex, i.uv.xy) + tex2D(_Bloom, i.uv.zw);
		} 

Motion Blur
In this example, we will save the previous rendering results, and continuously superimpose the current rendering image on the previous rendering image to produce a visual effect of motion trails.

  • Define a variable of the RenderTexture type to save the result of the previous image overlay, and destroy the texture when the script is not running, so that the image can be re-overlaid next time
    insert image description here
  • OnRenderImage function
	void OnRenderImage (RenderTexture src, RenderTexture dest) {
    
    
		if (material != null) {
    
    
			// Create the accumulation texture
			//判断合不合屏幕分辨率
			//不合的情况下重新创建
			if (accumulationTexture == null || accumulationTexture.width != src.width || accumulationTexture.height != src.height) {
    
    
				DestroyImmediate(accumulationTexture);
				accumulationTexture = new RenderTexture(src.width, src.height, 0);
				//这个变量不会存在于Hierarchy里面
				accumulationTexture.hideFlags = HideFlags.HideAndDontSave;
				//我们使用当前的帧图像初始化
				Graphics.Blit(src, accumulationTexture);
			}

			//进行渲染纹理的恢复操作
			//恢复操作发生在渲染到纹理而该纹理没有被提前清空或者销毁的情况下
			//accumulationTexture不需要提前清空,他有我们的混合结果
			accumulationTexture.MarkRestoreExpected();

			material.SetFloat("_BlurAmount", 1.0f - blurAmount);

			Graphics.Blit (src, accumulationTexture, material);
			Graphics.Blit (accumulationTexture, dest);
		} else {
    
    
			Graphics.Blit(src, dest);
		}
	}
  • The shader is relatively simple
    vertex shader
    insert image description here
  • Fragment shader, the A channel value of the rendering RGB channel is _BlurAmount, and the sample A channel can be returned directly, this is to maintain the transparent channel value of the rendering texture, otherwise it will be affected by _BlurAmount
    insert image description here
  • We will define two Passes, one for updating the RGB channel of the rendering texture, and the other for updating the A channel

Guess you like

Origin blog.csdn.net/woshi_wst/article/details/128818337