时间:2023/2/5 11:08:40
作者:木人不说话
# 深度纹理和法线纹理 #
直接获取屏幕图作为渲染纹理是很方便,但对于这种方式获得的渲染纹理的处理只能是基于屏幕的颜色信息。如果能够获得屏幕图像的深度信息和法线信息的话就能对屏幕图做更加复杂和精细的处理了。幸运的是Unity里是可以获取屏幕的深度纹理和法线纹理。
深度纹理的取值范围是[0,1],那么深度纹理是如何得到的呢?首先我们需要知道深度纹理中存储的是什么值。深度纹理中存储的深度值d是屏幕像素对应的片元的Z分量,即屏幕空间下的深度z_ndc。需要注意的是屏幕空间下分量范围都是[-1,1],而深度d范围是[0,1]。所以深度d和z_ndc有以下关系:
d = 0.5*z_ndc+0.5需要注意的是Unity内也不是任何情况下都能获得深度纹理。常规来说深度纹理可以从深度缓冲获得,也可以是一个单独的pass渲染得到。但是当无法直接获取深度缓冲时,Unity会使用着色器替换技术选择那些渲染类型为Opaque的物体,然后判断他们的渲染队列是否小于2500,只有符合这两个条件的才会被渲染到深度纹理和法线纹理中。所以想要正确获得深度纹理和法线纹理,物体的渲染类型和渲染队列都要设置正确。
Unity中可以选择生成一张深度纹理或者深度+法线纹理。当只获取深度纹理时,Unity会直接从深度缓冲获取,或者使用着色器替换技术使用这个物体投射阴影的Pass即LightMode设置为ShadowCaster的Pass来获取深度纹理。如果没有这个Pass那么就无法获取深度纹理。
如果生成一张深度+缓冲纹理Unity会创建一个和屏幕分辨率相同、精度为32的纹理。其中**观察空间**的法线信息会被写入R和G通道,深度信息被写入B和A通道。在向前渲染中,Unity默认是不会创建法线缓冲的,因此Unity会使用一个单独的Pass来将整个场景再渲染一次来获得法线纹理。
Unity中获得深度纹理和深度+法线纹理首先要设置相机的模式,代码如下:
camera.depthTextureMode = DepthTextureMode.Depth
camera.depthTextureMode = DepthTextureMode.DepthNormals;设置完成后就可以直接在Shader中使用_CameraDepthTexture变量来获得相应的纹理。一般来说得到纹理后可以直接使用tex2D进行采样即可。但由于平台差异,Unity提供了同意的宏SAMPLE_DEPTH_TEXTURE来对深度纹理进行采样,例如:
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv);要注意到深度纹理中得到的深度是屏幕空间中归一化到[0,1]的,如果想要得到视角空间的深度就得经过变换。Unity中的LinearEyeDepth就是将深度纹理的采样结果转换到视角空间,而Linear01Depth作用相同,只不过将深度结果归一化到[0,1]之间。
如果要对深度法线纹理进行采样,可以使用tex2D函数对_CameraDepthNormalsTexture进行采样。但代养结果无法直接使用必须得解码。可以使用DecodeDepthNormal函数进行解码,函数定义如下:
inline void DecodeDepthNormal(float4 enc,out float4 depth,out float3 normal)
{
depth = DecodeFloatRG(enc.zw);
normal = DecodeViewNormalStereo(enc);
}我们可以在帧调试器中查看摄像机生成的深度和法线纹理。使用帧调试器查看的深度是非线性空间的深度值。查看深度纹理的时候画面可能全是黑或者白,这是因为当场景内的物体太近或者太远时,深度纹理的值就接近0或者1,从而导致深度纹理要么全黑要么全白。
## 运动模糊 ##
这个例子是用深度纹理来保存模型片元的速度值,然后使用这个速度来决定模糊的方向和大小。基本的编程思路是先计算深度纹理的每个像素在世界空间的位置,然后利用上一帧的投影*视角矩阵阵得到当前像素在上一帧中屏幕空间的位置,最后利用屏幕空间的位置差来计算像素的速度,然后利用速度进行运动模糊。这个思路的关键点在于计算像素的世界空间坐标。深度像素当前深度d与屏幕空间的深度关系如下:
d = 0.5*z_ndc+0.5而屏幕空间的坐标是由裁剪空间进行齐次变换得到的,裁剪空间的坐标是由视角空间变化得来的。假设有一个点P则有以下关系:
P_ndc = (P_ndc_x,P_ndc_y,P_ndc_z), z_ndc = P_ndc_z
P_ndc = P_clip/P_clip_w;
P_clip = M_f * P_view //M_f是投影矩阵
P_view = M_v * P_world //M_v是视角矩阵所以可得:
tmp = (M_f*M_v)^-1*P_ndc //这里得到的是没有做逆齐次除法的矩阵
P_world = tmp/tmp_w; //做完齐次除法就能得到P点在世界空间的位置了投影矩阵和视角矩阵可以通过camera.ProjectionMatrix和camera.worldToCameraMatrix在脚本中直接获得。具体代码如下
脚本部分:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MotionBlurWithDepthTexture : PostEffectsBase
{
public Shader motionBlurShader;
private Material motionBlurMaterial = null;
public Material material
{
get
{
motionBlurMaterial = CheckShaderAndCreateMaterial(motionBlurShader, motionBlurMaterial);
return motionBlurMaterial;
}
}
[Range(0.0f, 1.0f)]
public float blurSize = 0.5f;
private Camera myCamera;
public Camera camera
{
get
{
if(myCamera == null)
{
myCamera = GetComponent<Camera>();
}
return myCamera;
}
}
private Matrix4x4 previousViewProjectionMatrix;
void OnEnable()
{
camera.depthTextureMode |= DepthTextureMode.Depth;
}
void OnRenderImage(RenderTexture src,RenderTexture dest)
{
if(material != null)
{
material.SetFloat(&#34;_BlurSize&#34;, blurSize);
material.SetMatrix(&#34;_PreviousViewProjectionMatrix&#34;, previousViewProjectionMatrix);
Matrix4x4 currentViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;
Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
material.SetMatrix(&#34;_CurrentViewProjectionInverseMatirx&#34;, currentViewProjectionInverseMatrix);
previousViewProjectionMatrix = currentViewProjectionMatrix;
Graphics.Blit(src, dest, material);
}
else
{
Graphics.Blit(src, dest);
}
}
}Shader部分:
Shader &#34;Shader/Chapter13/MotionBlurWithDepthTexture&#34;
{
Properties
{
_MainTex (&#34;Albedo (RGB)&#34;, 2D) = &#34;white&#34; {}
_BlurSize(&#34;BlurSize&#34;,Float) = 1.0
}
SubShader
{
CGINCLUDE
#include &#34;UnityCG.cginc&#34;
sampler2D _MainTex;
half4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
float4x4 _CurrentViewProjectionInverseMatirx;
float4x4 _PreviousViewProjectionMatrix;
half _BlurSize;
struct v2f{
float4 pos:SV_POSITION;
half2 uv:TEXCOORD0;
half2 uv_depth:TEXCOORD1;
};
v2f vert(appdata_img v){
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
#if UNITY_UV_START_AT_TOP
if(_MainTex_TexelSize.y<0){
o.uv_depth.y = 1- o.uv_depth.y;
#endif
return o;
}
fixed4 frag(v2f i):SV_Target{
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth);//等价于tex2D(_CameraDepthTexture,i.uv_depth).r
float4 H = float4(i.uv.x*2-1,i.uv.y*2-1,d*2-1,1);
float4 D = mul(_CurrentViewProjectionInverseMatirx,H);
float4 worldPos = D/D.w;
float4 currentPos = H;
float4 previousPos = mul(_PreviousViewProjectionMatrix,worldPos);
previousPos /= previousPos.w;
float2 velocity = (currentPos.xy - previousPos.xy)/2.0;
float2 uv = i.uv;
float4 color = tex2D(_MainTex,uv);
uv += velocity*_BlurSize;
for(int it=1;it<3;it++,uv+=velocity*_BlurSize){
float4 currentColor = tex2D(_MainTex,uv);
color += currentColor;
}
color /= 3;
return fixed4(color.rgb, 1.0);
}
ENDCG
Pass{
ZTest Always
Cull Off
ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack Off
}效果如下:
运动模糊_深度纹理
## 全局雾效 ##
实现全局雾效的关键在于要根据深度纹理重建每个像素对应片元在世界空间下的位置。在运动模糊中虽然实现了这个功能,但由于要做矩阵乘法,所以会影响游戏性能。这里介绍另一种方法。
我们想要计算每个像素对应的片元在世界空间的距离只需要计算出在视角空间下的坐标,然后加上摄像机在世界坐标下的坐标就能得到像素对应的片元在世界空间下的坐标。公式如下:
P_world = P_camere + P_view如果我们知道摄像机到像素的距离和方向,那么就可以根据相似三角形计算出像素对应的片元距离,从而得到片元在视角空间的坐标。关于摄像机我们可以知道近裁切面和远裁切面距离,FOV和纵横比,那么我们可以算出摄像机到近裁切面四个角的距离和方向。那么摄像机到像素上的距离和方向就可以根据四个角的方向插值出来,从而的到片元在世界空间下的坐标。
脚本代码如下:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class FogWithDepthTexture : PostEffectsBase
{
public Shader fogShader;
private Material fogMaterial = null;
public Material material
{
get
{
fogMaterial = CheckShaderAndCreateMaterial(fogShader, fogMaterial);
return fogMaterial;
}
}
private Camera myCamera;
public Camera camera
{
get
{
if (myCamera == null)
{
myCamera = GetComponent<Camera>();
}
return myCamera;
}
}
private Transform myCameraTransform;
public Transform cameraTransform
{
get
{
if (myCameraTransform == null)
{
myCameraTransform = camera.transform;
}
return myCameraTransform;
}
}
[Range(0.0f, 3.0f)]
public float fogDensity = 1.0f;
public Color fogColor = Color.white;
public float fogStart = 0.0f;
public float fogEnd = 2.0f;
void OnEnable()
{
camera.depthTextureMode = DepthTextureMode.Depth;
}
void OnRenderImage(RenderTexture src,RenderTexture dest)
{
if(material != null)
{
Matrix4x4 frustumCorners = Matrix4x4.identity;
float fov = camera.fieldOfView;
float near = camera.nearClipPlane;
float far = camera.farClipPlane;
float aspect = camera.aspect;
float halfHeight = near * Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad);
Vector3 toRight = cameraTransform.right * halfHeight * aspect;
Vector3 toTop = cameraTransform.up * halfHeight;
Vector3 topLeft = cameraTransform.forward * near + toTop - toRight;
float scale = topLeft.magnitude / near;
topLeft.Normalize();
topLeft *= scale;
Vector3 topRight = cameraTransform.forward * near + toRight + toTop;
topRight.Normalize();
topRight *= scale;
Vector3 bottomLelf = cameraTransform.forward * near - toRight - toTop;
bottomLelf.Normalize();
bottomLelf *= scale;
Vector3 bottomRight = cameraTransform.forward * near + toRight - toTop;
bottomRight.Normalize();
bottomRight *= scale;
frustumCorners.SetRow(0, bottomLelf);
frustumCorners.SetRow(1, bottomRight);
frustumCorners.SetRow(2, topRight);
frustumCorners.SetRow(3, topLeft);
material.SetMatrix(&#34;_FrustumCornersRay&#34;, frustumCorners);
material.SetMatrix(&#34;_ViewProjectionInverseMatrix&#34;, (camera.projectionMatrix * camera.worldToCameraMatrix).inverse);
material.SetFloat(&#34;_FogDensity&#34;, fogDensity);
material.SetColor(&#34;_FogColor&#34;, fogColor);
material.SetFloat(&#34;_FogStart&#34;, fogStart);
material.SetFloat(&#34;_FogEnd&#34;, fogEnd);
Graphics.Blit(src, dest, material);
}
else
{
Graphics.Blit(src, dest);
}
}
}Shader代码如下:
Shader &#34;Shader/Chapter13/FogWithDepthTexture&#34;
{
Properties
{
_MainTex (&#34;Albedo (RGB)&#34;, 2D) = &#34;white&#34; {}
_FogDensity(&#34;Fog Density&#34;,Float) = 1.0
_FogColor(&#34;Fog Color&#34;,Color) = (1.0,1.0,1.0,1.0)
_FogStart(&#34;Fog Start&#34;,Float) = 0.0
_FogEnd(&#34;Fog End&#34;,Float) = 1.0
}
SubShader
{
CGINCLUDE
#include &#34;UnityCG.cginc&#34;
float4x4 _FrustumCornersRay;
sampler2D _MainTex;
half4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
float _FogDensity;
float4 _FogColor;
float _FogStart;
float _FogEnd;
struct v2f{
float4 pos:SV_POSITION;
half2 uv:TEXCOORD0;
half2 uv_depth:TEXCOORD1;
float4 interpolatedRay:TEXCOORD2;
};
v2f vert(appdata_img v){
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
#if UNITY_UV_START_AT_TOP
if(_MainTex_TexelSize.y<0){
o.uv_depth.y = 1- o.uv_depth.y;
#endif
int index = 0;
if(v.texcoord.x < 0.5 && v.texcoord.y <0.5){
index = 0;
}else if(v.texcoord.x > 0.5 && v.texcoord.y <0.5){
index = 1;
}else if(v.texcoord.x > 0.5 && v.texcoord.y >0.5){
index = 2;
}else{
index = 3;
}
#if UNITY_UV_START_AT_TOP
if(_MainTex_TexelSize.y<0){
index = 3 - index;
#endif
o.interpolatedRay = _FrustumCornersRay[index];
return o;
}
fixed4 frag(v2f i):SV_Target{
float linearDepth = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth));
float3 worldPos = _WorldSpaceCameraPos + linearDepth*i.interpolatedRay;
float fogDensity = (_FogEnd - worldPos.y) /(_FogEnd - _FogStart);
fogDensity = saturate(fogDensity * _FogDensity);
fixed4 finalColor = tex2D(_MainTex, i.uv);
finalColor.rgb = lerp(finalColor.rgb,_FogColor.rgb,fogDensity);
return finalColor;
}
ENDCG
Pass{
ZTest Always
Cull Off
ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack Off
}效果如下:
全屏雾效_深度纹理
## 边缘检测 ##
上章使用Sobel算子进行的边缘检测产生了很多我们不希望的到的边缘光。如果使用深度纹理和法线纹理的话效果会好上很多,因为这些信息不会受到光照和纹理的影响。这次使用的是Roberts算子。
脚本代码如下:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
//myself
public class EdgeDetectNormalsAndDepth : PostEffectsBase
{
public Shader edgeDetectShader;
private Material edgeDetectMaterial = null;
public Material material
{
get
{
edgeDetectMaterial = CheckShaderAndCreateMaterial(edgeDetectShader, edgeDetectMaterial);
return edgeDetectMaterial;
}
}
[Range(0.0f, 1.0f)]
public float edgesOnly = 0.0f;
public Color edgeColor = Color.black;
public Color backgroundColor = Color.white;
public float sampleDistance = 1.0f;
public float sensitivityDepth = 1.0f;
public float sensitivityNormals = 1.0f;
void OnEnable()
{
GetComponent<Camera>().depthTextureMode |= DepthTextureMode.DepthNormals;
}
[ImageEffectOpaque]
void OnRenderImage(RenderTexture src, RenderTexture dest)
{
if (material != null)
{
material.SetFloat(&#34;_EdgeOnly&#34;, edgesOnly);
material.SetColor(&#34;_EdgeColor&#34;, edgeColor);
material.SetColor(&#34;_BackgroundColor&#34;, backgroundColor);
material.SetFloat(&#34;_SampleDistance&#34;, sampleDistance);
material.SetVector(&#34;_Sensitivity&#34;, new Vector4(sensitivityNormals, sensitivityDepth, 0.0f, 0.0f));
Graphics.Blit(src, dest, material);
}
else
{
Graphics.Blit(src, dest);
}
}
}
Shader代码如下:
Shader &#34;Shader/Chapter13/EdgeDetectNormalAndDepth&#34;
{
Properties
{
_MainTex (&#34;Albedo (RGB)&#34;, 2D) = &#34;white&#34; {}
_EdgeOnly(&#34;EdgeOnly&#34;,Float) = 1.0
_EdgeColor(&#34;Edge Color&#34;,Color) = (1,1,1,1)
_BackgroundColor(&#34;Background Color&#34;,Color) = (1,1,1,1)
_SampleDistance(&#34;Sample Distance&#34;,Float) = 1.0
_Sensitivity(&#34;Sensitivity&#34;,Vector) = (1, 1, 1, 1)
}
SubShader
{
CGINCLUDE
#include &#34;UnityCG.cginc&#34;
sampler2D _MainTex;
half4 _MainTex_TexelSize;
sampler2D _CameraDepthNormalsTexture;
float _EdgeOnly;
float4 _EdgeColor;
float4 _BackgroundColor;
float _SampleDistance;
float4 _Sensitivity;
struct v2f{
float4 pos:SV_POSITION;
half2 uv[5]:TEXCOORD0;
};
v2f vert(appdata_img v){
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv[0] = v.texcoord;
float2 uv = v.texcoord;
#if UNITY_UV_START_AT_TOP
if(_MainTex_TexelSize.y<0){
uv.y = 1- uv.y;
#endif
o.uv[1] = uv + _MainTex_TexelSize * float2(1,1) * _SampleDistance;
o.uv[2] = uv + _MainTex_TexelSize * float2(-1,-1) * _SampleDistance;
o.uv[3] = uv + _MainTex_TexelSize * float2(-1,1) * _SampleDistance;
o.uv[4] = uv + _MainTex_TexelSize * float2(1,-1) * _SampleDistance;
return o;
}
float CheckSame(float4 center,float4 sample){
float2 centerNormal = center.xy;
float centerDepth = DecodeFloatRG(center.zw);
float2 sampleNormal = sample.xy;
float sampleDepth = DecodeFloatRG(sample.zw);
float2 diffNormal = abs(centerNormal - sampleNormal) * _Sensitivity.x;
int isSameNormal = (diffNormal.x + diffNormal.y) < 0.1;
float diffDepth = abs(centerDepth - sampleDepth) * _Sensitivity.y;
int isSameDepth = diffDepth < 0.1 * centerDepth;
return isSameNormal * isSameDepth ? 1.0:0.0;
}
fixed4 fragRobertsCrossDepthAndNormal(v2f i) : SV_Target {
half4 sample1 = tex2D(_CameraDepthNormalsTexture, i.uv[1]);
half4 sample2 = tex2D(_CameraDepthNormalsTexture, i.uv[2]);
half4 sample3 = tex2D(_CameraDepthNormalsTexture, i.uv[3]);
half4 sample4 = tex2D(_CameraDepthNormalsTexture, i.uv[4]);
half edge = 1.0;
edge *= CheckSame(sample1, sample2);
edge *= CheckSame(sample3, sample4);
fixed4 withEdgeColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[0]), edge);
fixed4 onlyEdgeColor = lerp(_EdgeColor, _BackgroundColor, edge);
return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly);
}
ENDCG
Pass{
ZTest Always
Cull Off
ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment fragRobertsCrossDepthAndNormal
ENDCG
}
}
FallBack Off
}效果如下:
边缘检测_深度法线纹理
# 问题 #
1.突然意识到Pass一般是挂在某个模型上的,所以顶点着色器能够从模型中获得顶点的位置信息和法线信息。但挂在摄像机的Shader用来处理深度纹理和法线纹理以及图像,那么它的顶点着色器里的顶点信息从何而来?是Unity创建了一个屏幕大小的四边形吗?如果是是在哪一步创建的?如果不是又从何而来的数据? |