I’m implementing a deferred shader and am running into issues obtaining a 3D view space position from the depth buffer and screen space position. If I attach a depth renderbuffer for depth testing and write the depth values to a colour attachment, everything works as expected, like so:
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, w, h);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depthBuffer);
...
glGenTextures(1, &texDepth);
glBindTexture(GL_TEXTURE_2D, texDepth);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_FLOAT32_ATI, w, h, 0, GL_RGB, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT2_EXT, GL_TEXTURE_2D, texDepth, 0);
GLenum buffers[] = { GL_COLOR_ATTACHMENT0_EXT, GL_COLOR_ATTACHMENT1_EXT, GL_COLOR_ATTACHMENT2_EXT};
glDrawBuffers(3, buffers);
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// G Buffer fragment shader
gl_FragData[0] = vec4(oDiffuse, 1.0);
gl_FragData[1] = vec4(0.5f * (normalize(oNormal) + 1.0f) , 1.0);
gl_FragData[2] = vec4(oDepth.x / oDepth.y, 1.0, 1.0, 1.0);
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// Lighting fragment shader
float Depth = texture2D(iTexDepth, oTexCoords).r;
vec4 Position = vec4(oTexCoords.x * 2.0f - 1.0f, (oTexCoords.y * 2.0f - 1.0f), Depth, 1.0);
Position = mtxInvProj * Position;
Position /= Position.w;
However, when I try and use GL_DEPTH_ATTACHMENT_EXT as the attachment point (see listing below), I get incorrect results. Lighting changes with camera position, triangles facing away from the light source are lit and so on. When I display just the depth buffer, data is being written but it seems to be much more “bunched together” than when using GL_COLOR_ATTACHMENT2_EXT as the attachment point. For example, if I move the camera towards the mesh with the latter, the mesh depth values “pop” into view much more gradually than the former, so i figured that when I’m reconstructing the view space vector for a given fragment, the incorrect result is throwing off my point lighting. Any ideas?
glGenTextures(1, &texDepth);
glBindTexture(GL_TEXTURE_2D, texDepth);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, w, h, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri (GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_LUMINANCE);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, texDepth, 0);
GLenum buffers[] = { GL_COLOR_ATTACHMENT0_EXT, GL_COLOR_ATTACHMENT1_EXT};
glDrawBuffers(2, buffers);
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// G Buffer fragment shader
gl_FragData[0] = vec4(oDiffuse, 1.0);
gl_FragData[1] = vec4(0.5f * (normalize(oNormal) + 1.0f) , 1.0);
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// Lighting fragment shader
float Depth = texture2D(iTexDepth, oTexCoords).r;
vec4 Position = vec4(oTexCoords.x * 2.0f - 1.0f, (oTexCoords.y * 2.0f - 1.0f), Depth, 1.0);
Position = mtxInvProj * Position;
Position /= Position.w;
I’m using ATI hardware on Win32 platform. Thanks!