2014-04-06 42 views
0

我正在做級聯陰影貼圖,我相信我有一個問題,關於我進行拆分比較以選擇適當的陰影貼圖的方式。就目前而言,陰影映射是全面的,但在某些情況下,某些角度不起作用。確定級聯陰影貼圖拆分範圍

目前照明着色器級看起來像這樣:

"#version 420                             

const float DEPTH_BIAS = 0.00005;                        

layout(std140) uniform UnifDirLight                          
{                                
    mat4 mVPMatrix[4];                           
    mat4 mCamViewMatrix;  
    vec4 mSplitDistance;                          
    vec4 mLightColor;                           
    vec4 mLightDir;                            
    vec4 mGamma;                             
    vec2 mScreenSize;                           
} UnifDirLightPass;                            

layout (binding = 2) uniform sampler2D unifPositionTexture;                  
layout (binding = 3) uniform sampler2D unifNormalTexture;                  
layout (binding = 4) uniform sampler2D unifDiffuseTexture;                  
layout (binding = 6) uniform sampler2DArrayShadow unifShadowTexture;                

out vec4 fragColor;                            

void main()                              
{                                
    vec2 texcoord = gl_FragCoord.xy/UnifDirLightPass.mScreenSize;                

    vec3 worldPos = texture(unifPositionTexture, texcoord).xyz;                 
    vec3 normal = normalize(texture(unifNormalTexture, texcoord).xyz);               
    vec3 diffuse = texture(unifDiffuseTexture, texcoord).xyz;                 

    vec4 camPos = UnifDirLightPass.mCamViewMatrix * vec4(worldPos, 1.0);  // legit way of determining the split?            

    int index = 3;                   
    if (camPos .z > UnifDirLightPass.mSplitDistance.x)                   
     index = 0;                            
    else if (camPos .z > UnifDirLightPass.mSplitDistance.y)                  
     index = 1;                            
    else if (camPos .z > UnifDirLightPass.mSplitDistance.z)                  
     index = 2;                            

    vec4 projCoords = UnifDirLightPass.mVPMatrix[index] * vec4(worldPos, 1.0);             
    projCoords.w = projCoords.z - DEPTH_BIAS;                     
    projCoords.z = float(index);                        
    float visibilty = texture(unifShadowTexture, projCoords);                 

    float angleNormal = clamp(dot(normal, UnifDirLightPass.mLightDir.xyz), 0, 1);            

    fragColor = vec4(diffuse, 1.0) * visibilty * angleNormal * UnifDirLightPass.mLightColor;          
} 

與「mSplitDistance」,每個組件是截頭爲分裂的中心fardistance乘以主攝像機視圖矩陣

Vec4 camFarDistCenter; 
CameraFrustrum cameraFrustrum = CalculateCameraFrustrum(nearDistArr[cascadeIndex], farDistArr[cascadeIndex], lighting.mCameraPosition, lighting.mCameraDirection, camFarDistCenter); 

..... 

camFarDistCenter = lighting.mCameraViewMatrix * camFarDistCenter; 
splitDistances[cascadeIndex] = camFarDistCenter.z; 

下面是如何創建攝像機截頭爲每個分割,如果感興趣的,我相信這是一個很常見的alghorithm:

CameraFrustrum CalculateCameraFrustrum(const float minDist, const float maxDist, const Vec3& cameraPosition, const Vec3& cameraDirection, Vec4& camFarZ) 
{ 
    CameraFrustrum ret = { Vec4(-1.0f, -1.0f, 1.0f, 1.0f), Vec4(-1.0f, -1.0f, -1.0f, 1.0f), Vec4(-1.0f, 1.0f, 1.0f, 1.0f), Vec4(-1.0f, 1.0f, -1.0f, 1.0f), 
          Vec4(1.0f, -1.0f, 1.0f, 1.0f), Vec4(1.0f, -1.0f, -1.0f, 1.0f), Vec4(1.0f, 1.0f, 1.0f, 1.0f), Vec4(1.0f, 1.0f, -1.0f, 1.0f) }; 

    const Vec3 forwardVec = glm::normalize(cameraDirection); 
    const Vec3 rightVec = glm::normalize(glm::cross(forwardVec, Vec3(0.0f, 0.0f, 1.0f))); 
    const Vec3 upVec  = glm::normalize(glm::cross(rightVec, forwardVec)); 

    const Vec3 nearCenter = cameraPosition + forwardVec * minDist; 
    const Vec3 farCenter = cameraPosition + forwardVec * maxDist; 

    camFarZ = Vec4(farCenter, 1.0); 

    const float nearHeight = tan(glm::radians(70.0f)/2.0f) * minDist; 
    const float nearWidth = nearHeight * 1920.0f/1080.0f; 
    const float farHeight = tan(glm::radians(70.0f)/2.0f) * maxDist; 
    const float farWidth = farHeight * 1920.0f/1080.0f; 

    ret[0] = Vec4(nearCenter - (upVec * nearHeight) - (rightVec * nearWidth), 1.0); 
    ret[1] = Vec4(nearCenter + (upVec * nearHeight) - (rightVec * nearWidth), 1.0); 
    ret[2] = Vec4(nearCenter + (upVec * nearHeight) + (rightVec * nearWidth), 1.0); 
    ret[3] = Vec4(nearCenter - (upVec * nearHeight) + (rightVec * nearWidth), 1.0); 

    ret[4] = Vec4(farCenter - upVec * farHeight - rightVec * farWidth, 1.0); 
    ret[5] = Vec4(farCenter + upVec * farHeight - rightVec * farWidth, 1.0); 
    ret[6] = Vec4(farCenter + upVec * farHeight + rightVec * farWidth, 1.0); 
    ret[7] = Vec4(farCenter - upVec * farHeight + rightVec * farWidth, 1.0); 

    return ret; 
} 

像我這樣在相機空間進行分割比較是否聽起來很合理?這是一個潛在的問題嗎?

+0

downvote的任何理由?任何缺少的信息? – KaiserJohaan

回答

0
void CalculateCameraFrustrum(glm::mat4& projectionMatrix, 
          glm::mat4 viewMatrix,  // viewMatrix = light POV 
          glm::vec3 camera   // camera = eye position + eye direction 
          float  zNear, 
          float  zFar, 
          glm::vec4 point[4])   // point[4] = shadow map boundaries 
    glm::mat4 shadMvp = projectionMatrix * (viewMatrix * glm::translate(camera)); 
    glm::vec3 transf; 
    float  maxX = zNear, minX = zFar, 
       maxY = zNear, minY = zFar; 

    int i = -1; 
    while (++i < 4) 
    { 
     transf = shadMvp * point[i]; 

     transf.x /= transf.w; 
     transf.y /= transf.w; 

     if(transf.x > maxX) 
      maxX = transf.x; 
     if(transf.x < minX) 
      minX = transf.x; 
     if(transf.y > maxY) 
      maxY = transf.y; 
     if(transf.y < minY) 
      minY = transf.y; 
    } 

    float scaleX = 2.0f/(maxX - minX), 
      scaleY = 2.0f/(maxY - minY), 
      offsetX = -0.5f * (maxX + minX) * scaleX, 
      offsetY = -0.5f * (maxY + minY) * scaleY; 

    shadMvp = glm::mat4(1); // Identity matrix 

    shadMvp[0][0] = scaleX; 
    shadMvp[1][1] = scaleY; 
    shadMvp[0][3] = offsetX; 
    shadMvp[1][3] = offsetY; 

    projectionMatrix *= shadMvp; 
} // No need to calculate view frustum splitting, 
    // only the boundaries of the shadow maps levels are needed (glm::ortho(...)). 
    // :)