Normal vectors are being transformed weirdly

For some reason, my normals are getting transformed in a weird way. Here’s what it looks like when I output the normals as colors (scaling the -1 to 1 range to the 0 to 1 range of course):

Vertex shader:

#version 330 core

layout(location=0) in vec3 a_Position;
layout(location=1) in vec3 a_Normal;
layout(location=2) in vec4 a_Tangent;
layout(location=4) in vec2 a_UV;
layout(location=6) in ivec4 a_Joints;
layout(location=7) in vec4 a_Weights;

layout(std140) uniform u_VertexShaderData
{
	mat4 u_Model;
	mat4 u_View;
	mat4 u_Projection;
	mat4 u_NormalMtx;
	mat4 u_ShadowBias;
	mat4 u_Bones[28];
	mat4 u_NormalBones[28];
};

out vec4 o_Position;
out vec4 o_Normal;
out vec4 o_Tangent;
out vec4 o_Bitangent;
out vec2 o_UV;
out vec4 o_ShadowCoord;

void main()
{
	vec3 v_Position = a_Position;
	vec3 v_Normal = a_Normal;
	vec3 v_Tangent = a_Tangent.xyz;
	vec3 v_Bitangent = cross(a_Normal,a_Tangent.xyz);
	vec4 w_Position = vec4(v_Position,1.0);
	vec4 w_Normal = vec4(v_Normal,0.0);
	vec4 w_Tangent = vec4(v_Tangent,0.0);
	vec4 w_Bitangent = vec4(v_Bitangent,0.0);
	for(int i=0; i<4; i++)
	{
		mat4 m_Bone = u_Bones[a_Joints[i]];
		mat4 m_NormalBone = u_NormalBones[a_Joints[i]];
		v_Position += (m_Bone*w_Position).xyz*a_Weights[i];
		v_Normal += (m_NormalBone*w_Normal).xyz*a_Weights[i];
		v_Tangent += (m_NormalBone*w_Tangent).xyz*a_Weights[i];
		v_Bitangent += (m_NormalBone*w_Bitangent).xyz*a_Weights[i];
	}
	mat4 m_Position = u_Projection*u_View*u_Model;
	o_Normal = normalize(u_NormalMtx*vec4(v_Normal,0.0));
	o_Tangent = normalize(u_NormalMtx*vec4(v_Tangent,0.0));
	o_Tangent.w = a_Tangent.w;
	o_Bitangent = normalize(u_NormalMtx*vec4(v_Bitangent,0.0));
	o_UV = a_UV;
	gl_Position = m_Position*vec4(v_Position,1.0);
	o_ShadowCoord = u_ShadowBias*gl_Position;
}

Fragment shader:

#version 330 core

in vec4 o_Position;
in vec4 o_Normal;
in vec4 o_Tangent;
in vec4 o_Bitangent;
in vec2 o_UV;
in vec4 o_ShadowCoord;

const float u_EnvSize = 9.0;
const float u_IrrLod = 6.0;

uniform vec4 u_LightDirection;
uniform sampler2DShadow u_ShadowMap;
uniform samplerCube u_Radiance;
uniform sampler2D u_Albedo;
uniform sampler2D u_Metalness;
uniform sampler2D u_NormalMap;
uniform sampler2D u_Roughness;
uniform sampler2D u_Emission;

out vec4 o_FragColor;

vec3 sampleHDR(vec4 c)
{
	return vec3(c.rgb*exp2((c.a*255.0)-128.0));
}

float sampleShadow()
{
	return 1.0;
	//float bias = 0.005*tan(acos(dot(o_Normal,normalize(-u_LightDirection))));
	//bias = clamp(bias,0.0,0.01);
	//return 0.5+0.5*step(o_ShadowCoord.z-bias/o_ShadowCoord.w,texture(u_ShadowMap,o_ShadowCoord.xyz));
}

void main()
{
	vec4 v_Albedo = texture(u_Albedo,o_UV);
	vec4 v_NormalMap = (texture(u_NormalMap,o_UV)*2.0)-vec4(1.0);
	v_NormalMap.b = sqrt(1.0-dot(v_NormalMap.rg,v_NormalMap.rg));
	vec4 v_Normal = (vec4(o_Tangent.xyz,0.0)*v_NormalMap.r);
	v_Normal += (o_Bitangent*v_NormalMap.g);
	v_Normal = normalize(v_Normal+(o_Normal*v_NormalMap.b))*o_Tangent.w;
	vec4 v_Reflect = reflect(v_Normal,vec4(0.0,0.0,-1.0,0.0));
	float f_Roughness = u_EnvSize*texture(u_Roughness,o_UV).r;
	float f_Metalness = texture(u_Metalness,o_UV).r;
	vec3 v_Emission = texture(u_Emission,o_UV).rgb;
	float f_Emission = (0.3*v_Emission.r)+(0.6*v_Emission.g)+(0.1*v_Emission.b);
	
	vec3 v_Specular = vec3(1.0);
	//vec3 v_Specular = vec3(max(0.0,pow(dot(v_Reflect,vec4(0.577,0.577,0.577,0.0)),4.0+(44.0*f_Roughness))));
	//vec3 v_Specular = sampleHDR(textureLod(u_Radiance,v_Reflect.xyz,f_Roughness));
	vec3 v_Diffuse = vec3(0.5);
	//vec3 v_Diffuse = vec3(0.5*max(0.0,dot(v_Normal,vec4(0.577,0.577,0.577,0.0))));
	//vec3 v_Diffuse = sampleHDR(textureLod(u_Radiance,v_Normal.xyz,u_IrrLod));
	float f_Shadow = sampleShadow();
	float f_Fresnel = 0.04+0.96*pow(max(0.0,1.0-dot(v_Normal,vec4(0.0,0.0,-1.0,0.0))),5.0);
	vec3 v_Metal0 = v_Albedo.rgb*v_Diffuse;
	vec3 v_Metal1 = v_Albedo.rgb*v_Specular;
	
	vec3 resultC = (f_Shadow*mix(mix(v_Metal0,v_Metal1,f_Metalness),v_Specular,f_Fresnel))+v_Emission;
	float resultA = mix(v_Albedo.a,1.0,f_Fresnel)+f_Emission;
	o_FragColor = vec4((o_Normal.xyz+vec3(1.0))/2.0,1.0);
}

u_NormalMtx is calculated as so:

vsd.u_NormalMtx = glm::transpose(glm::inverse(vsd.u_View*vsd.u_Model));

where vsd is the struct used to pass the interface block to the shader.

EDIT

Here’s how the view and projection matrices are set up:

vsd.u_View = glm::lookAt(glm::vec3(0.0f,50.0f,50.0f),glm::vec3(0.0f),glm::vec3(0.0f,1.0f,0.0f));
vsd.u_Projection = glm::perspectiveFov(2.0f,640.0f,480.0f,0.1f,1000.0f);

And here’s the model position:

objBuffer[0].Position = glm::vec3(0.0f,0.0f,2.0f);
objBuffer[0].Rotation = glm::angleAxis(0.785398f,glm::vec3(0.0f,1.0f,0.0f));
objBuffer[0].Scale = glm::vec3(50.0f);

Finally, here’s how the model matrix is calculated from the position (this is inside a loop, hence the i):

vsd.u_Model = glm::translate(glm::mat4(),objBuffer[i].Position);
vsd.u_Model *= glm::toMat4(objBuffer[i].Rotation);
vsd.u_Model *= glm::scale(glm::mat4(),objBuffer[i].Scale);

EDIT 2

Here’s the vsd struct in full:

typedef struct {
	glm::mat4 u_Model;
	glm::mat4 u_View;
	glm::mat4 u_Projection;
	glm::mat4 u_NormalMtx;
	glm::mat4 u_ShadowBias;
	glm::mat4 u_Bones[28];
	glm::mat4 u_NormalBones[28];
} VertexShaderData;
VertexShaderData vsd;

For starters, I would bypass the skinning transforms and blending in your vertex shader and just pass your skin mesh vertex normal directly through to the fragment shader, so you can verify that at least it looks decent.

That is, at the end of your vertex shader, add this:

oNormal = a_Normal

Removed the skinning stuff, still got the same exact result:

#version 330 core

layout(location=0) in vec3 a_Position;
layout(location=1) in vec3 a_Normal;
layout(location=2) in vec4 a_Tangent;
layout(location=4) in vec2 a_UV;
layout(location=6) in ivec4 a_Joints;
layout(location=7) in vec4 a_Weights;

layout(std140) uniform u_VertexShaderData
{
	mat4 u_Model;
	mat4 u_View;
	mat4 u_Projection;
	mat4 u_NormalMtx;
	mat4 u_ShadowBias;
	mat4 u_Bones[28];
	mat4 u_NormalBones[28];
};

out vec4 o_Position;
out vec4 o_Normal;
out vec4 o_Tangent;
out vec4 o_Bitangent;
out vec2 o_UV;
out vec4 o_ShadowCoord;

void main()
{
	mat4 m_Position = u_Projection*u_View*u_Model;
	o_Normal = normalize(u_NormalMtx*vec4(a_Normal,0.0));
	o_Tangent = a_Tangent;
	o_Bitangent = normalize(u_NormalMtx*vec4(cross(a_Normal,a_Tangent.xyz),0.0));
	o_UV = a_UV;
	gl_Position = m_Position*vec4(a_Position,1.0);
	o_ShadowCoord = u_ShadowBias*gl_Position;
}

Since all the weights and joints are zero, and all the bone matrices are initialized to identity matrices, I expected as much.

You’ve still got u_NormalMtrx in the mix. Bypass it too, so you can verify that the skin mesh normals are coming through properly.

If that works as expected, populate u_NormalMtrx with an identity transform on the CPU and verify that you get the same image.

I get the same thing in both cases.

EDIT

After rendering the w component of the normal, it seems as though all the incorrect/gray areas have a w of 1, while the correct/colored areas have a w of 0. I think this may be related, and after printing the normal matrix, the fourth column has some non-zero components.

Ok, good finding.

Yeah, in general you only need to pass in a 3x3 matrix to transform the normals. And if your MODELVIEW transform doesn’t contain any non-uniform scales or shears, you don’t even need to pass in that. Just use the upper-left 3x3 of the MODELVIEW matrix. This benefits you in that you no longer need to pass in u_NormalMtx nor u_NormalBones[...]. (If you do use uniform scales though, be sure to renormalize your normals after transforming.)

If you’re just computing the full 4x4 inverse transpose of MODELVIEW for the normal matrices, you’d expect some non-zero numbers along an edge, so that’s fine. For now, you can try just forcing the normal.w to 0 in the shader (or only transforming the vec3 normal by the upper-left 3x3) to get rid of the normal.w issue.

I’d also take a look at how you’re populating the vertex normals to ensure that they’re being passed into the shader as you expect. You should be able to doctor those up on the CPU side and see the corresponding color output when you render your normals as colors.

As for rendering your normals as colors, consider sending x/y/z to the r/g/b color components. That makes it easy to see if you’re really getting out what you expect to be getting. For instance:

    vec3 tmp    = o_Normal.xyz * 0.5 + 0.5;
    o_FragColor = vec4( tmp, 1.0 );

I fixed the issue by simply wrapping my normal matrices in a mat3, like so:

o_Normal = normalize(vec4(mat3(u_NormalMtx)*v_Normal,0.0));
o_Tangent = normalize(vec4(mat3(u_NormalMtx)*v_Tangent,0.0));
o_Bitangent = normalize(vec4(mat3(u_NormalMtx)*v_Bitangent,0.0));

In general though, I’m not sure if I’ll use scaling in my animations or not. Since it’s useful for the squash-and-stretch technique, I’ll probably end up specifying the bone normal matrices just in case.

This topic was automatically closed 183 days after the last reply. New replies are no longer allowed.