Hi!
I would like to use images like this :
depthGenFragShader
:
#version 460
#extension GL_ARB_bindless_texture : enable
in vec4 frontColor;
in vec2 fTexCoords;
in flat uint texIndex;
in flat uint layer;
layout(std140, binding=0) uniform ALL_TEXTURES {
sampler2D textures[200];
};
layout(binding = 0, rgba32f) uniform image2D depthBuffer;
layout (location = 0) out vec4 fColor;
void main () {
vec4 texel = (texIndex != 0) ? frontColor * texture2D(textures[texIndex-1], fTexCoords.xy) : frontColor;
float z = gl_FragCoord.z;
float l = layer;
//vec4 depth = imageLoad(depthBuffer,ivec2(gl_FragCoord.xy));
/*if (l > depth.y || l == depth.y && z > depth.z) {
fColor = vec4(0, l, z, texel.a);
imageStore(depthBuffer,ivec2(gl_FragCoord.xy),vec4(0,l,z,texel.a));
} else {*/
fColor = vec4(0, 0, z, texel.a);
//}
}
But when I use the image to store and load depth info, the compute of the normals are not good in my other shader :
perPixLightingFragmentShader
:
#version 460
in vec4 frontColor;
in vec2 fTexCoords;
in flat uint layer;
const vec2 size = vec2(2.0,0.0);
const ivec3 off = ivec3(-1,0,1);
uniform sampler2D depthTexture;
uniform sampler2D lightMap;
uniform sampler2D specularTexture;
uniform sampler2D bumpMap;
uniform sampler2D alphaMap;
uniform vec3 resolution;
uniform vec4 lightColor;
uniform vec4 lightPos;
layout (location = 0) out vec4 fColor;
void main () {
vec2 position = (gl_FragCoord.xy / resolution.xy);
vec2 invPosition = vec2(position.x, 1 - position.y);
vec4 depth = texture2D(depthTexture, position);
vec4 invDepth = texture2D (depthTexture, invPosition);
vec4 alpha = texture2D(alphaMap, position);
float s01 = textureOffset(depthTexture, position, off.xy).z;
float s21 = textureOffset(depthTexture, position, off.zy).z;
float s10 = textureOffset(depthTexture, position, off.yx).z;
float s12 = textureOffset(depthTexture, position, off.yz).z;
vec3 va = normalize (vec3(size.xy, s21 - s01));
vec3 vb = normalize (vec3(size.yx, s12 - s10));
vec3 normal = vec3(cross(va, vb));
vec4 bump = texture2D(bumpMap, position);
vec4 specularInfos = texture2D(specularTexture, position);
vec3 sLightPos = vec3 (lightPos.x, lightPos.y, -lightPos.z * (gl_DepthRange.far - gl_DepthRange.near));
float radius = lightPos.w;
vec3 pixPos = vec3 (gl_FragCoord.x, gl_FragCoord.y, -depth.z * (gl_DepthRange.far - gl_DepthRange.near));
vec4 lightMapColor = texture2D(lightMap, position);
vec3 viewPos = vec3(resolution.x * 0.5f, resolution.y * 0.5f, 0);
float z = gl_FragCoord.z;
vec3 vertexToLight = sLightPos - pixPos;
if (bump.x != 0 || bump.y != 0 || bump.z != 0) {
vec3 tmpNormal = (normal.xyz);
vec3 tangeant = normalize (vec3(size.xy, s21 - s01));
vec3 binomial = normalize (vec3(size.yx, s12 - s10));
normal.x = dot(bump.xyz, tangeant);
normal.y = dot(bump.xyz, binomial);
normal.z = dot(bump.xyz, tmpNormal);
}
if (layer > depth.y || layer == depth.y && z >= depth.z) {
vec4 specularColor = vec4(0, 0, 0, 0);
float attenuation = 1.f - length(vertexToLight) / radius;
vec3 pixToView = pixPos - viewPos;
float normalLength = dot(normal.xyz, vertexToLight);
vec3 lightReflect = vertexToLight + 2 * (normal.xyz * normalLength - vertexToLight);
float m = specularInfos.r;
float p = specularInfos.g;
float specularFactor = dot(normalize(pixToView), normalize(lightReflect));
specularFactor = pow (specularFactor, p);
if (specularFactor > 0) {
specularColor = vec4(lightColor.rgb, 1) * m * specularFactor;
}
if (normal.x != 0 || normal.y != 0 || normal.z != 0 && vertexToLight.z > 0.f) {
vec3 dirToLight = normalize(vertexToLight.xyz);
float nDotl = dot (dirToLight, normal.xyz);
attenuation *= nDotl;
}
fColor = vec4(lightColor.xyz, 1) * max(0.0f, attenuation) + specularColor * (1 - alpha.a);
} else {
fColor = lightMapColor;
}
}
Why when I use the image I haven’t the same rendering than when I not use it ?
I tried to change the coordinates system with layout (upper_left_corner)
but the rendering is not good too.
It’s when I disable depth test and I use the image the depth values are not good.
Thanks.