hi,
please could someone explain me when and why do i have to divide my tranformed coordinates in e.g. pixelshader by w ?
here a typical NVIDIA example. the comments and questions are made by myself.
struct VS_OUTPUT
{
float4 ScreenP : SV_POSITION;
float4 P : TEXCOORD0;
float3 N : NORMAL0;
};
// Vertex Shader
VS_OUTPUT main( uint id : SV_VERTEXID )
{
VS_OUTPUT output;
... some Code ..
float3 N; // Normal
N.x = ((face_idx % 3) == 0) ? 1 : 0;
N.y = ((face_idx % 3) == 1) ? 1 : 0;
N.z = ((face_idx % 3) == 2) ? 1 : 0;
N *= ((face_idx / 3) == 0) ? 1 : -1;
P += N; // World Coordinate
output.P = mul(c_mObject, float4(P, 1)); // transform with object world matrix with w = 1 => float4(P, 1)
output.ScreenP = mul(c_mViewProj, output.P); // transform further with ViewProj in clip coordinates
output.N = mul(c_mObject, float4(N, 0)).xyz; // transform with object world matrix with w = 0 because only rotations apply
return output;
}
cbuffer CameraCB : register( b0 )
{
column_major float4x4 c_mViewProj : packoffset(c0);
float3 c_vEyePos : packoffset(c4);
float c_fZNear : packoffset(c5);
float c_fZFar : packoffset(c5.y);
};
// pixelShader
float4 main(VS_OUTPUT input) : SV_Target0
{
float3 P = input.P.xyz / input.P.w; // => my Question why do we have the world coordnate by w ???
float3 N = normalize(input.N); // normalize because normal is interpolated in pixelshader ?
float3 Kd = c_vObjectColor;
const float SHADOW_BIAS = -0.001f;
float4 shadow_clip = mul(c_mLightViewProj, float4(P,1)); // => here P is transformed to clipspace coordinate with w = 1
shadow_clip = shadow_clip / shadow_clip.w; // => why division by w again ??
uint hemisphereID = (shadow_clip.z > 0) ? 0 : 1;
float2 shadow_tc = float2(0.5f, -0.5f)*shadow_clip.xy + 0.5f; // => here xy used for texure coordinates
float receiver_depth = shadow_clip.z+SHADOW_BIAS; // => here z used as depth
float total_light = 0;
const int SHADOW_KERNEL = 2;
[unroll]
for (int ox=-SHADOW_KERNEL; ox<=SHADOW_KERNEL; ++ox)
{
[unroll]
for (int oy=-SHADOW_KERNEL; oy<=SHADOW_KERNEL; ++oy)
{
total_light += tShadowmap.SampleCmpLevelZero(sampShadowmap, shadow_tc, receiver_depth, int2(ox, oy)).x;
}
}
float shadow_term = total_light / ((2*SHADOW_KERNEL+1) * (2*SHADOW_KERNEL+1));
float3 output = float3(0,0,0);
float3 L = -c_vLightDirection;
// Spotlight)
{
float light_to_world = length(P - c_vLightPos); // P (divided by w) used as world Pos but LightPos is not divided by w
float3 W = (c_vLightPos - P)/light_to_world; // Light direction Vector is calculated from P
float distance_attenuation = 1.0f/(c_vLightAttenuationFactors.x + c_vLightAttenuationFactors.y*light_to_world
+ c_vLightAttenuationFactors.z*light_to_world*light_to_world) + c_vLightAttenuationFactors.w;
const float ANGLE_EPSILON = 0.00001f;
float angle_factor = saturate((dot(N, L)-c_fLightFalloffCosTheta)/(1-c_fLightFalloffCosTheta));
float spot_attenuation = (angle_factor > ANGLE_EPSILON) ? pow(angle_factor, c_fLightFalloffPower) : 0.0f;
float3 attenuation = distance_attenuation*spot_attenuation*shadow_term*dot(N, W);
float3 ambient = 0.00001f*saturate(0.5f*(dot(N, L)+1.0f));
output += c_vLightColor*max(attenuation, ambient) * exp(-c_vSigmaExtinction*light_to_world);
}
return float4(output, 1);
}