Unity realizes afterimage effect
Hello everyone, I am Zhao.
Here we continue to introduce the method of making afterimages in Unity. I introduced the method of BakeMesh to make afterimages before, and this issue introduces the method of post-screen processing to make afterimages.
1. Principle
The previous BakeMesh method actually generated many mesh models in the scene. If it is done with post-processing, there is no such process.
It can be seen that although the afterimage is seen in the Game view, in fact there is only a grid model of a character in the scene.
In fact, the method of post-processing to do afterimage is very simple. First copy a sub-camera that is the same as the main camera, then this camera only looks at the character layer, and finally, set a RenderTexture for this camera as the targetTexture.
In this way, we can get a RenderTexture with only characters while the main camera renders the complete picture.
Then we maintain a queue that holds the RenderTextures of the rendered characters in the past few frames. As for how many frames need to be saved and how long to save a frame, it depends on your own needs.
After getting this RenderTexture queue, the rest is very simple. Pass this queue into the post-processing material.
At this time, these RenderTextures are actually as follows:
The post-processing Shader is very simple. It is to combine these Textures in sequence with different transparency:
In this way, the afterimage effect is made. If you want to modify the color of the afterimage, you can directly multiply the texture of the afterimage by a color during post-processing.
Second, the advantages and disadvantages
1. Advantages
Compared with the BakeMesh method, this post-processing method does not need to render the meshes of many more characters, but only requires one more camera to render all the characters that need afterimages once more. We can make an optimization. When a character needs to be remnant, set it as a special Layer so that the remnant camera can render it. When there are no characters that need afterimages, this camera can't see anything.
Then, even if there are a lot of characters in the scene at the same time, at most, it is enough to render each character once more, which is very friendly to the performance consumption of rendering. Just sacrifice some memory, copy and save this RenderTexture in memory. I don't think it's that big of a waste.
If you want to do other effects on this basis, it is also very easy. For example, if you want to blur afterimages, or Bloom, or color correction, in fact, you just need to process the saved Textures. There are many effects that can be achieved. .
2. Disadvantages
Since multiple textures are saved as the basis for compositing afterimages, how many textures are appropriate to save is a problem. If you save less, the afterimage effect will not be very obvious, and if you save more, the memory usage will be more.
3. Code
Since it is a demo, it is written in a simpler way without optimization. Let’s see the principle.
1、C#
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MoveImageEffectCtrl : MonoBehaviour
{
// Start is called before the first frame update
private List<Texture> rtList;
public Camera subCam;
public bool isMove = false;
private Texture2D blackTex;
public Material mat;
public int spaceTime = 10;
private int countTime = 0;
private Vector3 oldPos;
public GameObject role;
void Start()
{
CreateBlackTexture();
}
// Update is called once per frame
void Update()
{
countTime++;
if (countTime % spaceTime == 0)
{
CheckMove();
}
}
private void CheckMove()
{
if (Vector3.Distance(oldPos, role.transform.position) > 0)
{
isMove = true;
RenderTexture camTarget = subCam.targetTexture;
RenderTexture rt = RenderTexture.GetTemporary(camTarget.width, camTarget.height);
CopyRender(camTarget, rt);
AddToRTList(rt);
oldPos = role.transform.position;
}
else
{
isMove = false;
}
}
private void CopyRender(RenderTexture source, RenderTexture destination)
{
Graphics.Blit(source, destination);
}
private void CreateBlackTexture()
{
blackTex = new Texture2D(128, 128);
for (int i = 0; i < 128; i++)
{
for (int j = 0; j < 128; j++)
{
blackTex.SetPixel(i, j, Color.clear);
}
}
blackTex.Apply();
}
private void AddToRTList(Texture rt)
{
if (rtList == null)
{
rtList = new List<Texture>();
}
rtList.Add(rt);
if (rtList.Count > 5)
{
for (int i = 0; i < rtList.Count - 5; i++)
{
Texture tex = rtList[0];
rtList.RemoveAt(0);
if (tex is RenderTexture)
{
RenderTexture.ReleaseTemporary((RenderTexture)tex);
}
}
}
}
private void SetTexToMat()
{
if (isMove == false)
{
mat.SetFloat("_isMove", 0);
}
else
{
mat.SetFloat("_isMove", 1);
for (int i = 0; i < 5; i++)
{
string key = "_Tex" + (i + 1);
Texture tex = GetTexById(i);
mat.SetTexture(key, tex);
}
}
}
private Texture GetTexById(int id)
{
if (rtList == null || rtList.Count <= id)
{
return blackTex;
}
else
{
return rtList[id];
}
}
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
SetTexToMat();
if(isMove)
{
Graphics.Blit(source, destination, mat);
}
else
{
Graphics.Blit(source, destination);
}
}
}
2、Shader
Shader "Unlit/MoveEffectCom"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_Tex1("Tex1",2D) = "black"{}
_Tex2("Tex2",2D) = "black"{}
_Tex3("Tex3",2D) = "black"{}
_Tex4("Tex4",2D) = "black"{}
_Tex5("Tex5",2D) = "black"{}
_isMove("isMove",Float) = 0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _Tex1;
sampler2D _Tex2;
sampler2D _Tex3;
sampler2D _Tex4;
sampler2D _Tex5;
float _isMove;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
half4 frag (v2f i) : SV_Target
{
// sample the texture
half4 col = tex2D(_MainTex, i.uv);
if (_isMove > 0)
{
half4 addTex1 = tex2D(_Tex1, i.uv);
half4 addTex2 = tex2D(_Tex2, i.uv);
half4 addTex3 = tex2D(_Tex3, i.uv);
half4 addTex4 = tex2D(_Tex4, i.uv);
half4 addTex5 = tex2D(_Tex5, i.uv);
half3 rgb = col.rgb + saturate(addTex1.rgb*addTex1.a*0.6f + addTex2.rgb*addTex2.a*0.5f + addTex3.rgb*addTex3.a*0.3f + addTex4.rgb*addTex4.a*0.2f + addTex5.rgb*addTex5.a*0.1f)*(1-col.a)*float3(1,0,0);
rgb = saturate(rgb);
col = half4(rgb, col.a);
}
return col;
}
ENDCG
}
}
}