unity urp管线扫光效果

urp管线下实现扫光效果

基本思路,还是根据深度还原世界坐标(以下简称world_pos),根据坐标的xyz判断是否在一些范围内,然后进行基于xyz两个坐标轴进行扫光

这里进行一个扩展,让扫光可以沿着任一方向进行,

就是传入一个旋转变换矩阵,对world_pos进行变换,然后判断aabb盒,这样就可以实现非xyz轴方向的扫光效果

运行效果如下:

可以沿任意方向扫光

代码如下:

shader:

Shader "lsc/screen_scan_shader"
{
    Properties
    {
        _MainTex("Texture", 2D) = "white" {}
        _scan_brush("Texture", 2D) = "white" {}
    }
    SubShader
    {
        // No culling or depth
        Cull Off ZWrite Off ZTest Always

        Pass
        {
            HLSLPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"

            //sampler2D _CameraDepthTexture;
            float4x4 _mtx_view_inv;
            float4x4 _mtx_proj_inv;
            //float4x4 _mtx_clip_to_world;
            TEXTURE2D_X_FLOAT(_CameraDepthTexture);
            SAMPLER(sampler_CameraDepthTexture);


            //扫光参数
            float4 _scan_color;
            float4 _scan_box_min;
            float4 _scan_box_max;
            float4x4 _scan_box_world_mtx;

            TEXTURE2D_X_FLOAT(_scan_brush);
            SAMPLER(sampler_LinearClamp);

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
                float4 screen_space : TEXCOORD1;
            };

            float4 cal_world_pos_by_dep(float ndc_dep, float2 screen_space, out float4 view_pos)
            {
                // 取出非线性深度与视深度
                float linearDepthZ = LinearEyeDepth(ndc_dep, _ZBufferParams);
                // 屏幕转ndc
                float4 ndc_pos;
                ndc_pos.xy = screen_space * 2.0 - 1.0;
                ndc_pos.zw = float2(ndc_dep, 1);
                // 添加齐次因子
                ndc_pos = ndc_pos * linearDepthZ;
                // 转成观察与世界坐标
                view_pos = mul(_mtx_proj_inv, ndc_pos);
                float4 world_pos = mul(_mtx_view_inv, float4(view_pos.xyz, 1));

                return world_pos;
            }

            int is_in_box(float3 pos)
            {
                //todo : need option
                if (pos.x <= _scan_box_max.x &&
                    pos.y <= _scan_box_max.y &&
                    pos.z <= _scan_box_max.z &&
                    pos.x >= _scan_box_min.x &&
                    pos.y >= _scan_box_min.y &&
                    pos.z >= _scan_box_min.z
                    )
                    return 1;

                return 0;
            }

            v2f vert (appdata v)
            {
                v2f o;

                o.vertex = TransformObjectToHClip(v.vertex);
                o.uv = v.uv;

                // 计算屏幕空间坐标(含齐次因子)
                o.screen_space = ComputeScreenPos(o.vertex);

                return o;
            }



            sampler2D _MainTex;

            float4 frag (v2f i) : SV_Target
            {
                float4 col = tex2D(_MainTex, i.uv);

                // 插值后的屏幕坐标去除齐次因子
                float2 screen_space = i.screen_space.xy / i.screen_space.w;
                // 取出非线性深度
                float org_depth = SAMPLE_TEXTURE2D_X(_CameraDepthTexture, sampler_CameraDepthTexture, screen_space).x;
                // 计算世界坐标
                float4 view_pos;
                float4 world_pos = cal_world_pos_by_dep(org_depth, screen_space, view_pos);

                //转换到包围盒的旋转平移空间
                float4 box_space_pos = mul(_scan_box_world_mtx, float4(world_pos.xyz, 1));
                int in_box = is_in_box(box_space_pos.xyz);

                float2 brush_uv = float2(
                    abs(box_space_pos.x - _scan_box_min.x) / (_scan_box_max.x - _scan_box_min.x),
                    abs(box_space_pos.z - _scan_box_min.z) / (_scan_box_max.z - _scan_box_min.z)
                    );

                // 从纹理中采样渐变效果,或者也可以用xz坐标计算生成简单的渐变效果
                float4 scan_brush = SAMPLE_TEXTURE2D_X(_scan_brush, sampler_LinearClamp, brush_uv).x;
                float scan_scale = scan_brush.r * brush_uv.x * brush_uv.y;

                col.xyz = _scan_color.xyz * in_box * scan_scale + col.xyz;


                //float tmp = world_pos.y;
                //col.xyz = float3(tmp, tmp, tmp);


                return col;
            }

            ENDHLSL
        }
    }
}

render feature pass c#代码

using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;

public class ScreenScanRenderPassFeature : ScriptableRendererFeature
{
    class CustomRenderPass : ScriptableRenderPass
    {
        public Material screen_scan_material = null;
        public RenderTargetIdentifier render_target_color;
        public RenderTargetHandle temp_render_target;

        // This method is called before executing the render pass.
        // It can be used to configure render targets and their clear state. Also to create temporary render target textures.
        // When empty this render pass will render to the active camera render target.
        // You should never call CommandBuffer.SetRenderTarget. Instead call <c>ConfigureTarget</c> and <c>ConfigureClear</c>.
        // The render pipeline will ensure target setup and clearing happens in a performant manner.
        public override void OnCameraSetup(CommandBuffer cmd, ref RenderingData renderingData)
        {
        }

        // Here you can implement the rendering logic.
        // Use <c>ScriptableRenderContext</c> to issue drawing commands or execute command buffers
        // https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html
        // You don't have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline.
        public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
        {
            //Debug.Log("Execute fog pass");

            if (!screen_scan_material)
                return;

            {
                Camera cam = renderingData.cameraData.camera;
                var mtx_view_inv = cam.worldToCameraMatrix.inverse;
                var mtx_proj_inv = cam.projectionMatrix.inverse;

                screen_scan_material.SetMatrix("_mtx_view_inv", mtx_view_inv);
                screen_scan_material.SetMatrix("_mtx_proj_inv", mtx_proj_inv);
            }

            // scan box
            {
                Vector4 box_min = new Vector4(-15, -1000, -50, 0);
                Vector4 box_max = new Vector4(15, 1000, 50, 0);
                screen_scan_material.SetVector("_scan_box_min", box_min);
                screen_scan_material.SetVector("_scan_box_max", box_max);
                screen_scan_material.SetVector("_scan_color", new Vector4(179.0f/255.0f, 224.0f / 255.0f, 230.0f / 255.0f, 1.0f));

                Matrix4x4 box_mtx = Matrix4x4.identity;
                //box_mtx = Matrix4x4.Rotate(Quaternion.Euler(0, 45 + 30 * Time.time, 0)) * Matrix4x4.Translate(new Vector3(-400, 0, -380));
                box_mtx = Matrix4x4.Rotate(Quaternion.Euler(0, 45 - 30 * Time.time, 0)) * Matrix4x4.Translate(new Vector3(-400, 0, -380));

                screen_scan_material.SetMatrix("_scan_box_world_mtx", box_mtx);
            }

            const string CommandBufferTag = "screen scan Pass";
            var cmd = CommandBufferPool.Get(CommandBufferTag);

            RenderTextureDescriptor opaqueDesc = renderingData.cameraData.cameraTargetDescriptor;
            opaqueDesc.depthBufferBits = 0;
            cmd.GetTemporaryRT(temp_render_target.id, opaqueDesc);

            // 通过材质,将计算结果存入临时缓冲区
            cmd.Blit(render_target_color, temp_render_target.Identifier(), screen_scan_material);
            // 再从临时缓冲区存入主纹理
            cmd.Blit(temp_render_target.Identifier(), render_target_color);

            // 执行命令缓冲区
            context.ExecuteCommandBuffer(cmd);
            // 释放命令缓存
            CommandBufferPool.Release(cmd);
            // 释放临时RT
            cmd.ReleaseTemporaryRT(temp_render_target.id);
        }

        // Cleanup any allocated resources that were created during the execution of this render pass.
        public override void OnCameraCleanup(CommandBuffer cmd)
        {
        }
    }

    CustomRenderPass m_ScriptablePass;
    public Material screen_scan_material = null;

    /// <inheritdoc/>
    public override void Create()
    {
        m_ScriptablePass = new CustomRenderPass();

        // Configures where the render pass should be injected.
        m_ScriptablePass.renderPassEvent = RenderPassEvent.AfterRenderingOpaques;
    }

    // Here you can inject one or multiple render passes in the renderer.
    // This method is called when setting up the renderer once per-camera.
    public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
    {
        m_ScriptablePass.render_target_color = renderer.cameraColorTarget;
        m_ScriptablePass.screen_scan_material = screen_scan_material;

        renderer.EnqueuePass(m_ScriptablePass);
    }
}


猜你喜欢

转载自blog.csdn.net/lsccsl/article/details/118091355