Do I need to bind and unbind my vertex buffer every draw call

This is going off a previous thread. I want to ask a specific question. Anyway, here is the context: I am trying to write a simple OpenGL program that makes a line segment spin counterclockwise like a clock hand. In the following code, I have been told that the lines with !!! at the end are unnecessary in the draw loop, as the VAO knows what VBO it’s working with.

However, when I comment out the two lines with !!! at the end in the draw loop, the line stops spinning and I am left with a still line segment. So my question is, do I really need to bind and unbind my VBO every draw call. Thank you for your help :slight_smile:

Here is the full code. Every line:

#include <iostream>
#include <stdio.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>

#include <glm\glm.hpp>
#include <glm\gtc\matrix_transform.hpp>
#include <glm\gtc\type_ptr.hpp>

#include <string>
#include "util.h"
#include <thread>
#include <chrono>
#include <cmath>

// Setup: Linker->General, Linker->Input, add that .dll next to main

// Window dimensions
const GLint WIDTH = 1720, HEIGHT = 1600; // height was 1200

GLuint VAO, VBO, shader;

int numberOfVerticies = 2; // number of full points we want to draw
int index = 0;

// Vertex shader -> takes each vertex and allows you manipulate them then pass them onto the fragmanet shader

// In the shaders you are writing GLSL
static const char* vShader = "  \n\
#version 330	\n\
layout (location = 0) in vec3 pos;		\n\
										\n\
void main()										\n\
{							\n\
	gl_Position = vec4(pos.x, pos.y, pos.z, 1.0);								\n\
										\n\
										\n\
}";

// Fragment shader
static const char* fShader = "  \n\
#version 330	\n\
out vec4 colour; 		\n\
										\n\
void main()										\n\
{							\n\
	colour = vec4(1.0, 1.0, 1.0, 1.0);								\n\
										\n\
										\n\
}";

void CreateLines()
{

	GLfloat verticies[6];
	util::addToArray(verticies, index);


	glGenVertexArrays(1, &VAO); // Reserves space on the graphics card to be accessed by the variable VAO
	// Now we bind the Vertex Array so now any opengl functions we use that interact with vertext arrays or vertex buffers will all be taking place within this vertex array defined by VAO
	glBindVertexArray(VAO);

	glGenBuffers(1, &VBO);
	glBindBuffer(GL_ARRAY_BUFFER, VBO);
	// Static draw used when you aren't going to be changing the values in the actual array
	glBufferData(GL_ARRAY_BUFFER, sizeof(verticies), verticies, GL_DYNAMIC_DRAW); // Just changing to dynamic didn't make a bug

	glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); // location of attribute, values at a time, type of those values, normalization, stride, offset
	glEnableVertexAttribArray(0);

	glBindBuffer(GL_ARRAY_BUFFER, 0); // Unbind
	glBindVertexArray(0); // Unbind

}



void AddShader(GLuint theProgram, const char* shaderCode, GLenum shaderType)
{
	GLuint theShader = glCreateShader(shaderType); // Creates an empty shader of that type and pass ID into theShader

	const GLchar* theCode[1];
	theCode[0] = shaderCode;

	GLint codeLength[1];
	codeLength[0] = strlen(shaderCode);

	glShaderSource(theShader, 1, theCode, codeLength); // This modifies the value of the shader code in memory
	glCompileShader(theShader);

	GLint  result = 0;
	GLchar eLog[1024] = { 0 };

	glGetShaderiv(theShader, GL_COMPILE_STATUS, &result); // Gets result of linking the shader
	if (!result)
	{
		glGetShaderInfoLog(theShader, sizeof(eLog), NULL, eLog);
		printf("Error compiling the %d shader: '%s'\n", shaderType, eLog);
		return;
	}
	glAttachShader(theProgram, theShader);
}

// Compiling the shaders, not handling adding the shaders to the program
void CompileShaders()
{
	shader = glCreateProgram(); // Creates the program and gives shader the id so we can use shader to modify it

	if (!shader)
	{
		printf("Error creating shader\n");
		return;
	}

	AddShader(shader, vShader, GL_VERTEX_SHADER); // GL_VERTEX_SHADER is a built in enum -> type of shader -> needs to know what type of shader is being used
	AddShader(shader, fShader, GL_FRAGMENT_SHADER);

	GLint  result = 0;
	GLchar eLog[1024] = { 0 };

	glLinkProgram(shader); // Actually create the executables on the graphics card and make sure it's working
	glGetProgramiv(shader, GL_LINK_STATUS, &result); // Gets result of linking the shader
	if (!result)
	{
		glGetProgramInfoLog(shader, sizeof(eLog), NULL, eLog);
		printf("Error linking program: '%s'\n", eLog);
		return;
	}

	// Validate the program
	glValidateProgram(shader); // Makes sure the shader is valid in the current context that we're working in

	glGetProgramiv(shader, GL_VALIDATE_STATUS, &result); // Gets result of linking the shader
	if (!result)
	{
		glGetProgramInfoLog(shader, sizeof(eLog), NULL, eLog);
		printf("Error validating program: '%s'\n", eLog);
		return;
	}
}

int main()
{
	// Initialize GLFW
	if (!glfwInit())
	{
		printf("GLFW initialization failed!");
		glfwTerminate();
		std::cin.get();
		return 1;
	}

	// Setup GLFW window properties
	// OpenGL version
	glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
	glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
	// Core profile = No backwards compatabililty
	glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
	// Allow forward compatibility
	glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);

	GLFWwindow* mainWindow = glfwCreateWindow(WIDTH, HEIGHT, "Playground", NULL, NULL);

	if (!mainWindow)
	{
		printf("GLFW window creation failed");
		glfwTerminate();
		std::cin.get();
		return 1;
	}

	// Get buffer size information
	// We want to get the dimentions of the area in the middle of the window, it's the buffer, it's the part that's going to be holding all the openGL data as it's being passed to the window
	int bufferWidth, bufferHeight;
	glfwGetFramebufferSize(mainWindow, &bufferWidth, &bufferHeight);

	// Set the context for GLEW to use
	// Let GLEW know that this OpenGL context is the one everything should be tied to so when everything gets drawn it should get drawn to this window
	glfwMakeContextCurrent(mainWindow);
	
	// Allow modern extension features
	glewExperimental = GL_TRUE;

	if (glewInit() != GLEW_OK)
	{
		printf("GLEW initilisation failed!");
		glfwDestroyWindow(mainWindow);
		glfwTerminate();
		std::cin.get();
		return 1;
	}

	// Setup viewport size
	// There is a difference here between WIDTH and bufferWidth, should look into
	glViewport(0, 0, bufferWidth, bufferHeight);

	// CreateLines();
	// CompileShaders();

	int i = 0;
	CompileShaders();
	CreateLines();


	// Loop until window closed
	while (!glfwWindowShouldClose(mainWindow))
	{

		// Get and handle user input events
		glfwPollEvents(); // will check for any user events

		index++;

		// Clear the window
		glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
		glClear(GL_COLOR_BUFFER_BIT);

		glUseProgram(shader); // Grabs the id then goes to the graphics card and says to use the one with the ID of shader
		// Everything below here will be using this shader
		glBindVertexArray(VAO);
		// Now using that VAO

		glBindBuffer(GL_ARRAY_BUFFER, VBO);// !!!

		// Static draw used when you aren't going to be changing the values in the actual array
			GLfloat verticies[6];
			util::addToArray(verticies, index);
			glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(verticies), verticies);


		glBindBuffer(GL_ARRAY_BUFFER, 0); // Unbind !!!

		glDrawArrays(GL_LINES, 0, numberOfVerticies); // Mode, where to start in the array, the ammount of points we want to draw

		glBindVertexArray(0);
		glUseProgram(0); // Unassignes shader

		glfwSwapBuffers(mainWindow); // We are drawing to a hidden buffer that constantly gets swapped around

	}
	std::cin.get();
	return 0;
}

Also the util namespace:

#pragma once
#include <iostream>
#include <stdio.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <string>
#include <stdio.h>
#include <math.h>

namespace util
{

	void addToArray(GLfloat theArray[], int& index)
	{
		theArray[0] = 0.0f;
		theArray[1] = 0.0f;
		theArray[2] = 0.0f;

		theArray[3] = 0.5f*cos(index*3.1415926 / 180);
		theArray[4] = 0.5f*sin(index*3.1415926 / 180);
		theArray[5] = 0.0f;
		
	}

}

If:

  1. you need to change the contents of the buffer object,
  2. the method you’re using to update the buffer contents requires the buffer to be bound,
  3. and the buffer object is not already bound to a buffer object binding point (aka target)

then you need to bind it to one before you do the update.

In your case, all this is true before your buffer update, so you need to bind it, as you are doing.

For what it’s worth, in your simple example you could trivially satisfy #3 by just not manually unbinding the buffer object from the GL_ARRAY_BUFFER bind target each time you reference it. Then it would already be bound. However in a more realistic program where multiple buffer objects are being bound, this is less likely to be an option.

And you could avoid #2 by updating the buffer object using Direct State Access routines (see the gl*NamedBuffer*() APIs here), or by updating the buffer object contents using Persistently Mapped Buffers.

1 Like

Thank you for your response, this has been very helpful