I have been trying to implement a ray tracer in my fragment shader. And I need to clear off some confusions first:
i) I have scan through some example ray tracer, and I find that most of them tend to use the fragCoord to cast the ray. But I have been wondering why don’t they use eye and vertex coordinate to cast the ray? Please correct me if I am wrong here, the camera/eye is at (0,0,0) in eye coordinate. And suppose we multiply vertex with modelview matrix: myvertex = (ModeviewMAtrix * vertex) , we get a result of vertex in eye coordinate stored in myvertex. Is it appropriate to cast a ray with ray.Origin at (0,0,0) and direction of the ray is the position of myvertex in normalized form?
I am trying to do a very basic stuff only here, drawing 2 sphere with different ambient different material, no lighting calculation involve yet, I just want my shader to be able to correctly map the correct ambient color onto my spheres
size 640 480 // window size
camera 0 -4 4 0 0 0 0 1 1 45 //eye, center, up, fovy
pushTransform // first sphere should look grey color
ambient .7 .7 .7
sphere 0 0 0 1 // xyz radius
popTransform;
pushTransform // 2nd sphere, purple
translate 2 0 0
ambient .1 .7 .7
sphere 0 0 0 1
popTransform;
//vertex shader
void main() {
gl_Position = pm * mv * vec4(vertices, 1.0) ; // pm and mv are uniforms for projection and modelview mat
myvertex = mv * vec4(vertices, 1.0) ; // vertices is the varying input passed from window with
}
//Fragment shader
//data for raytracer//
const int numObj = 2;
uniform vec4 ambData[numObj];
uniform vec4 diffData[numObj];
uniform vec4 specData[numObj];
uniform vec4 emiData[numObj];
uniform mat4 transfData[numObj];
uniform float shnData[numObj];
uniform int typeData[numObj];
uniform float sizeData[numObj];
uniform mat4 lookAt;
uniform int maxDepth;
//////////////////////
bool circleIntersect(in vec3 cen, in float r, in vec3 ori, in vec3 dir, inout float t ){
vec3 RC = ori - cen;
float DD = dot(dir, dir);
float DdRC = dot(dir, RC);
float sqtN, sqtP;
t = r*r - dot(RC,RC) + DdRC*DdRC;
if( t > 0.0 ) // 2 root
{
sqtP = sqrt(t) - DdRC;
sqtN = -sqtP - DdRC;
if (sqtP <= 0 && sqtN <= 0){
return false;
}
if (sqtN < sqtP){
t = sqtN;
}
else{
t = sqtP;
}
if (t <= 0)
return false;
return true;
}
return false;
}
vec4 intersection(in vec3 rayO, in vec3 rayD){
vec4 retClr = vec4(0.0);
float tMin = t_inf; //t_inf is a constant = 100 000.0f
float t = tMin;
vec3 norm;
int closestIdx;
for (int i = 0; i < numObj; i++){
if (typeData[i] == 2){
vec4 c = lookAt * transfData[i] * vec4(0.0, 0.0, 0.0, 1.0);
if (circleIntersect(c.xyz, 6.0, rayO, rayD, t)){
if (t < tMin){
closestIdx = i;
tMin = t;
}
}
else{
continue;
}
}
else if (typeData[i] == 4){
if (cubeIntersect(transfData[i], rayO, rayD, t, norm) ){
if (t < tMin){
closestIdx = i;
tMin = t;
}
}
else{
continue;
}
}
}
retClr = vec4(0,0,0,1) + ambData[closestIdx];
return retClr;
}
void main (void)
{
vec4 eye = vec4(0,0,0,1); //lookAt * vec4(0.0, -4.0, 4.0, 1.0);
vec3 rayOri = eye.xyz / eye.w;
vec3 rayDir = normalize(myvertex.xyz - eye.xyz);
gl_FragColor += intersection(rayOri, rayDir);
}
The result I get is 2 circle are drawn on the screen. But sadly both of them in purple color…I rotate the camera to see these 2 sphere from different directions but both of them look completely purple, no other color. I highly doubt my ray setup is totally wrong.