Projection matrix doesn't work

My program renders two squares side by side. I am trying to implement a projection matrix into the mix but it won’t work. When I multiply it by the vector in the vertex shader, I get nothing.

#include <iostream>
#include "glad/glad.h"
#include "GLFW/glfw3.h"
#include "glm/glm.hpp"
#include "glm/ext.hpp"
#include "glm/gtc/matrix_transform.hpp"
#include "SOIL.h"

#define out std::cout
#define end std::endl

const GLchar *vertexShaderSource = R"(
#version 330 core

layout (location = 0) in vec3 pos;
layout (location = 1) in vec2 coord;

out vec2 TexCoord;

uniform float aspect_ratio;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;

void main() 
{
    gl_Position = projection * vec4(pos.x, pos.y, pos.z, 1.0);
    TexCoord = coord;
}
)";

const GLchar *fragmentShaderSource = R"(
#version 330 core

in vec2 TexCoord;
out vec4 Color;

uniform sampler2D ourTexture;

void main() 
{
    //Color = vec4(1.0, 0.5, 0.2, 1.0);
    Color = texture(ourTexture, TexCoord);
}
)";

void framebuffer_size_callback(GLFWwindow*, GLint, GLint);
int render(GLFWwindow*, GLuint, GLuint);
void buffer(GLuint&, GLuint&, GLuint&, float [], unsigned int []);
void shader(GLuint, GLuint);
void processInput(GLFWwindow*);

GLuint shaderProgram;

/* Flow
main -> render -> shader -> processInputs
*/

int main()
{
    glfwInit();
    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
    glfwWindowHint(GLFW_RESIZABLE, GL_TRUE);

    GLFWwindow *window = glfwCreateWindow(1280, 720, "Voxel Game", NULL, NULL);

    if (!window)
    {
        out << "Failed to create GLFW window" << end;
        glfwTerminate();
        return -1;
    }

    else
    {
        glfwMakeContextCurrent(window);
        glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
    }

    gladLoadGL();

    if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
    {
        out << "Failed to initialize GLAD" << end;
        return -1;
    }

    const GLFWvidmode* mode = glfwGetVideoMode(glfwGetPrimaryMonitor());
    GLint window_width = mode->width;
    GLint window_height = mode->height;

    render(window, window_width, window_height);

    return 0;
}

int render(GLFWwindow *window, GLuint window_width, GLuint window_height)
{
    float vertices[] = 
    {
        // positions      // texture coords
        0.2f, 0.2f, 0.0f, 1.0f, 1.0f,   // top right
        0.2f, -0.2f, 0.0f, 1.0f, 0.0f,  // bottom right
        -0.2f, -0.2f, 0.0f, 0.0f, 0.0f, // bottom left
        -0.2f, 0.2f, 0.0f, 0.0f, 1.0f   // top left 
    };

    float vertices2[] =
    {
        // positions      // texture coords
        0.6f, 0.2f, 0.0f, 1.0f, 1.0f,    // top right
        0.6f, -0.2f, 0.0f, 1.0f, 0.0f,   // bottom right
        0.2f, -0.2f, 0.0f, 0.0f, 0.0f,   // bottom left
        0.2f, 0.2f, 0.0f, 0.0f, 1.0f     // top left 
    };

    unsigned int indices[] = 
    {
       0, 1, 3, // first triangle
       1, 2, 3  // second triangle
    };

    // Vertex Array, Vertex Buffer and Element Buffer
    GLuint VAO[2], VBO[2], EBO[2];
    glGenVertexArrays(2, VAO);
    glGenBuffers(2, VBO);
    glGenBuffers(2, EBO);

    glBindVertexArray(VAO[0]);
    glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
    glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO[0]);
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);

    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)0);
    glEnableVertexAttribArray(0);
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
    glEnableVertexAttribArray(1);

    glBindVertexArray(VAO[1]);
    glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
    glBufferData(GL_ARRAY_BUFFER, sizeof(vertices2), vertices2, GL_STATIC_DRAW);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO[1]);
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);

    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)0);
    glEnableVertexAttribArray(0);
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
    glEnableVertexAttribArray(1);

    shader(window_width, window_height);

    // Load Image
    int width, height, channels;
    unsigned char *image = SOIL_load_image("grass.png", &width, &height, &channels, SOIL_LOAD_RGB);

    // Texture
    unsigned int texture;
    glGenTextures(1, &texture);
    glBindTexture(GL_TEXTURE_2D, texture);

    // Settings
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);

    // Texture Image
    if (image) 
    {
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
        glGenerateMipmap(GL_TEXTURE_2D);
    }

    else
    {
        out << "Failed to load texture" << end;
    }

    SOIL_free_image_data(image);

    // Render Loop
    while (!glfwWindowShouldClose(window))
    {
        processInput(window);

        glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
        glClear(GL_COLOR_BUFFER_BIT);

        //glBindTexture(GL_TEXTURE_2D, texture);
        //glUseProgram(shaderProgram);

        glBindVertexArray(VAO[0]);
        glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); // 6 = Number of vertices (3 for each triangle)

        glBindVertexArray(VAO[1]);
        glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); 

        glfwSwapBuffers(window);
        glfwPollEvents();
    }

    glDeleteVertexArrays(2, VAO);
    glDeleteBuffers(2, VBO);

    glfwTerminate();
    return 0;
}

void shader(GLuint window_width, GLuint window_height)
{
    GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
    glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
    glCompileShader(vertexShader);

    GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
    glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
    glCompileShader(fragmentShader);

    shaderProgram = glCreateProgram();
    glAttachShader(shaderProgram, vertexShader);
    glAttachShader(shaderProgram, fragmentShader);
    glLinkProgram(shaderProgram);
    glUseProgram(shaderProgram);

    GLuint location = glGetUniformLocation(shaderProgram, "aspect_ratio");
    glUniform1f(location, window_width / (float)window_height);
    
    glm::mat4 model = glm::mat4(1.0f); // make sure to initialize matrix to identity matrix first
    glm::mat4 view = glm::mat4(1.0f);
    glm::mat4 projection = glm::perspective(glm::radians(180.0f), window_width / (float)window_height, 0.1f, 100.0f);

    // retrieve the matrix uniform locations
    GLuint modelLoc = glGetUniformLocation(shaderProgram, "model");
    GLuint viewLoc = glGetUniformLocation(shaderProgram, "view");
    GLuint projectionLoc = glGetUniformLocation(shaderProgram, "projection");

    // pass them to the shaders (3 different ways)
    glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model));
    glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view));
    glUniformMatrix4fv(projectionLoc, 1, GL_FALSE, glm::value_ptr(projection));

    glDeleteShader(vertexShader);
    glDeleteShader(fragmentShader);
}

void processInput(GLFWwindow* window)
{
    if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, true);
}

void framebuffer_size_callback(GLFWwindow* window, GLint width, GLint height)
{
    glViewport(0, 0, width, height);
}

This probably has something to do with it. A Y field-of-view angle of 180 degrees is invalid.

You’re defining a wedge of space anchored at a point (the eyepoint). Wider FOV angles widen that wedge. When you get to 180 degrees, the wedge degenerates.

You can see this in the math here:

When fovyInDegrees reaches 180, it basically computes tan( π / 2 ) (aka tan( 90 deg )), which is undefined (infinite):

Also, when quoting source code, please enclose it with lines containing 3 back-ticks on them (```). It makes it much easier to read. For example:

int main()
{
    mainloop();
    return 0;
}
1 Like

Thanks for the reply, but I tried it with smaller angles (30, 45, 60) already, but to no avail.

Your vertex positions all have Z=0; with the model-view matrix being an identity matrix, the triangles are outside of the view frustum.

1 Like

The projection matrix transforms points from eye-space to clip-space. In the shader, you’re only multiplying your input position (pos) by the projection matrix. Therefore, pos must be in eye-space.

Eye-space looks down the -Z axis. The near clip plane is Z = -nearval. The far clip plane is Z = -farval. In your case. That means Z = -0.1f and Z = -100.0f.

Therefore, as a test, just hack your vertex shader override pos.z with a Z value between -0.1 and -100. For instance:

    gl_Position = projection * vec4(pos.x, pos.y, -10, 1.0);
1 Like

Ah thank you, I understand. I suspected perhaps the Z value was involved, but forgot the value has to be negative. It does work now, appreciate the help :grinning:

Okay new problem. I have now tried to implement a model and view matrix into the mix like so :

glm::mat4 model = glm::mat4(1.0f); // make sure to initialize matrix to identity matrix first
glm::mat4 view = glm::mat4(1.0f);

model = glm::rotate(model, glm::radians(-45.0f), glm::vec3(1.0f, 0.0f, 0.0f));
view = glm::translate(view, glm::vec3(0.0f, 0.0f, 0.0f));

const GLchar *vertexShaderSource = R"(
#version 330 core

layout (location = 0) in vec3 pos;
layout (location = 1) in vec2 coord;

out vec2 TexCoord;

uniform float aspect_ratio;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;

void main() 
{
    gl_Position = projection * view * model * vec4(pos.x, pos.y, -1.0f, 1.0);
    TexCoord = coord;
}
)";

Issue is, the rotation isn’t working correctly. It doesn’t rotate around the X-axis, it gets distorted and moved elsewhere instead. It should stay in place, and look as though it’s tilted towards the floor. I have tried with only the projection and rotation matrices to isolate the problem, the rotation matrix is it, the other two work fine.

Now that you’re using modeling and viewing transforms (and feeding object-space positions into your shader), you probably want to get rid of this hard-coded Z of -1.0 and use pos.z. That’s from back when you were feeding eye-space positions into your shader.

1 Like

Tried that, it’s still not right.

void main() 
{
    gl_Position = projection * view * model * vec4(pos, 1.0);
    TexCoord = coord;
}
)";

Originally these are green squares in the center of the screen, the rotation makes them go down and distort as so.

1 Like

That’s what you would expect for rotation about the X axis.

1 Like

No it should stay center and not change position. If it’s rotating away from the view along x-axis, then it should shorten (flat line at 90 degrees) not lengthen, something isn’t right here.

That’s what the tutorial rotation looks like, same place but tilted away from view, that’s what I want.

Does the quad lie in the Z=0 plane? Rotations are about the origin of the local coordinate system.

In order for the quad to be visible with Z=0, the view transformation will need to have a translation in the negative Z direction.

1 Like

Yeah my Z range is from -0.1 to 100, and I changed the vertices of the triangles to -0.2. I don’t see how this is relevant though, as I am rotating around X-axis.

Nobody knows? The projection and translation matrices work fine, it’s only the rotation that does not.

The following matrix :

glm::mat4 model_view_projection = glm::rotate(glm::mat4(1.0f), glm::radians(45.0f), glm::vec3(1.0f, 0.0f, 0.0f));

Outputs this :

1 0 0 0
0 0.707107 -0.707107 0
0 0.707107 0.707107 0
0 0 0 1

Doesn’t seem right, glm bug?

The matrix is fine; a 45° rotation about the X axis.

I’m not entirely sure what the problem is, but I think that you’re expecting it to rotate about a line through the centre of the quad, but the plane of the quad is offset from the origin.

The quad should have all 4 vertices with Z=0, and any translation needs to be applied before the rotation (i.e. the rotation should be the last transformation).

It might help if you make the rotation angle vary with time (use e.g. glutGet(GLUT_ELAPSED_TIME) to get the current time) so that you can visualise the rotation.

1 Like

This is where I am confused. My projection matrix defines the Z bounds from -0.1f to -100.0f, so I can’t have any vertices at zero. Eventually I will have lots of textures at lots of depths, and they will all have to rotate in place. I currently have no idea how to do this.

I’m currently not translation, but I have the translation matrix come before the rotation one, so I have the order down right.

GameGuru, … use the glm::lookAt() and apply whatever transformations you like in the model-matrix.
The view-matrix is an abstraction.

1 Like

Got it working like so :

 glm::mat4 projection = glm::perspective(glm::radians(120.0f), window_width / (float)window_height, 0.1f, 100.0f);

    glm::mat4 view = glm::lookAt
    (
        glm::vec3(0, 0, 1), 
        glm::vec3(0, 0, 0), 
        glm::vec3(0, 1, 0)  
    );

    glm::mat4 model_view = glm::rotate(view, glm::radians(-55.0f), glm::vec3(1.0f, 0.0f, 0.0f));

    glm::mat4 model_view_projection = projection * model_view;

I replaced glm::translation with glm:lookAt. My understanding is that my camera is now at Z of 1, while the bounds is still -0.1 to -100 (so it’s as if it’s on the outside of the screen pointed at it). It’s looking right at the origin, where my vertices are centered, and is thus able to see everything.

My question is, will I be able to do all the translations simply using this new view matrix, will I even have to bother with glm::translate?

Also why didn’t yall tell me about this function :stuck_out_tongue_winking_eye:

Any combination of rotate/translate transformations can be converted to a lookAt transformation. And vice versa (although the reverse has some redundancy; ultimately you only need 6 scalar parameters to uniquely specify such a transformation). Which is preferable depends upon what information you start with. If you have the lookAt parameters (eye position, target position, up direction) immediately available, then lookAt makes it straightforward to calculate the view part of the model-view transformation. You would typically still use rotate/translate for the model part.

In practice, lookAt is most useful if the camera is attached to some form of vehicle whose position is obtained by integrating its velocity, and the desired orientation is “whatever it needs to be to end up looking directly at some other point”. If you want more direct control of the camera (e.g. a “VR” style model where the user decides where to look), you’d normally generate the view transformation via translate/rotate.

1 Like

I see, this gives me a much better understanding of how this all works. My project will eventually be a playable video game with a moving player that can look and go anywhere. It looks like it would be better to use translate / rotate based on what you said. Thanks.