Hello,
I have encountered the following issue:
I have been making a 2D engine. In it, there are generic Vertex Buffers (that is, an engine struct that holds VBO, EBO and VAO data). Relevant code:
VertexBufferID VertexBufferCreate() {
static VertexBufferID VertexBufferFreeID = 1;
VertexBuffer vb;
glGenVertexArrays(1, &vb.VAO);
glGenBuffers(1, &vb.VBO);
glGenBuffers(1, &vb.EBO);
glBindVertexArray(vb.VAO);
glBindBuffer(GL_ARRAY_BUFFER, vb.VBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vb.EBO);
glBindVertexArray(0);
XEDrawState* drawState = GetDrawState();
drawState->storage.vecVertexBuffer.push_back(vb);
VertexBufferIndex index = (VertexBufferIndex)drawState->storage.vecVertexBuffer.size() - 1;
VertexBufferID myID = VertexBufferFreeID++;
vb.ID = myID;
drawState->storage.mapVertexBufferIDIndex.insert({ myID, index });
return myID;
}
void VertexBufferDestroy(VertexBufferID ID) {
XEDrawState* drawState = GetDrawState();
VertexBufferIndex index = drawState->storage.mapVertexBufferIDIndex[ID];
VertexBuffer& vb = drawState->storage.vecVertexBuffer[index];
glDeleteBuffers(1, &vb.VAO);
glDeleteBuffers(1, &vb.VBO);
glDeleteBuffers(1, &vb.EBO);
VertexBufferID lastID = drawState->storage.vecVertexBuffer.back().ID;
VSETNPOP(drawState->storage.vecVertexBuffer, index);
drawState->storage.mapVertexBufferIDIndex[lastID] = index;
drawState->storage.mapVertexBufferIDIndex.erase(ID);
}
void VertexBufferDescribeInternal(VertexBufferID bufferID, const std::vector<int>& vec) {
XEDrawState* drawState = GetDrawState();
VertexBufferIndex index = drawState->storage.mapVertexBufferIDIndex[bufferID];
VertexBuffer& buf = drawState->storage.vecVertexBuffer[index];
buf.vecAttribute.clear();
for (int i = 0; i < vec.size(); i += 2) {
VERTEXATTRIBTYPE aType = (VERTEXATTRIBTYPE)vec[i];
int num = vec[i + 1];
buf.vecAttribute.push_back({ aType, num });
}
int stride = 0;
for (VertexAttribute& v : buf.vecAttribute) {
stride += v.num * GetVertexAttributeTypeSize(v.aType);
}
buf.stride = stride;
GLint activeLocation = 0;
uint64_t offset = 0;
glBindVertexArray(buf.VAO);
for (size_t i = 0; i < buf.vecAttribute.size(); ++i) {
VertexAttribute& va = buf.vecAttribute[i];
switch (va.aType) {
case VERTEXATTRIBTYPE::FLOAT:
glVertexAttribPointer(activeLocation, va.num, GL_FLOAT, GL_FALSE, stride, (GLvoid*)offset);
break;
case VERTEXATTRIBTYPE::INT:
glVertexAttribIPointer(activeLocation, va.num, GL_INT, stride, (GLvoid*)offset);
break;
case VERTEXATTRIBTYPE::UINT:
glVertexAttribIPointer(activeLocation, va.num, GL_UNSIGNED_INT, stride, (GLvoid*)offset);
break;
case VERTEXATTRIBTYPE::DOUBLE:
glVertexAttribLPointer(activeLocation, va.num, GL_DOUBLE, stride, (GLvoid*)offset);
break;
}
glEnableVertexArrayAttrib(buf.VAO, activeLocation);
++activeLocation;
offset += va.num * GetVertexAttributeTypeSize(va.aType);
}
glBindVertexArray(0);
}
My goal is for users to create vertex buffers, then define the format of the vertex buffer by calling VertexBufferDescribe(…) and then set the internal vertex/index buffer data by using the following two functions:
void VertexBufferSetVertexDataInternal(VertexBufferID ID, uint8_t* data, size_t size) {
XEDrawState* drawState = GetDrawState();
VertexBufferIndex index = drawState->storage.mapVertexBufferIDIndex[ID];
VertexBuffer& vb = drawState->storage.vecVertexBuffer[index];
glBindBuffer(GL_ARRAY_BUFFER, vb.VBO);
glBufferData(GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW);
glBufferData(GL_ARRAY_BUFFER, size, data, GL_DYNAMIC_DRAW);
vb.vecVert.resize(size);
memcpy(&vb.vecVert[0], data, size);
}
void VertexBufferSetIndexDataInternal(VertexBufferID ID, uint8_t* data, size_t size) {
assert(sizeof(GLuint) == sizeof(uint32_t));
XEDrawState* drawState = GetDrawState();
VertexBufferIndex index = drawState->storage.mapVertexBufferIDIndex[ID];
VertexBuffer& vb = drawState->storage.vecVertexBuffer[index];
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vb.EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, size, NULL, GL_STATIC_DRAW);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, size, data, GL_STATIC_DRAW);
vb.vecIndex.resize(size / sizeof(uint32_t));
memcpy(&vb.vecIndex[0], data, size);
}
Question 1 : Am I doing something unbelievably stupid here? It seems to be working so far.
My issue here is the following: While editing/rendering tilemaps, a tilemap’s Vertex Buffer may be frequently updated. When this happens, this code runs:
void Project::TilemapUpdateVertexBuffer(ResourceTileset* res, TILEMAP& tilemap, VertexBufferID* vbID) {
if (XEDRAW::EXPOSED::VertexBufferExists(*vbID)) {
XEDRAW::EXPOSED::VertexBufferDestroy(*vbID);
}
*vbID = XEDRAW::EXPOSED::VertexBufferCreate();
XEDRAW::VertexBufferDescribeInternal(*vbID, {
XEDRAW::VERTEXATTRIBTYPE::FLOAT, 3,
XEDRAW::VERTEXATTRIBTYPE::FLOAT, 4,
XEDRAW::VERTEXATTRIBTYPE::FLOAT, 2
});
size_t vertSize = 3 * sizeof(float) + 4 * sizeof(float) + 2 * sizeof(float);
size_t numQuads = tilemap.width * tilemap.height;
size_t numVerts = 4 * numQuads;
uint8_t* data = (uint8_t*)malloc(numVerts * vertSize);
XEBUFFER::BufferWriter bw(data);
assert(XEDRAW::EXPOSED::SurfaceExists(res->surfTileset));
const float invTilemapTexWidth = 1.0f / (float)XEDRAW::EXPOSED::SurfaceGetWidth(res->surfTileset);
const float invTilemapTexHeight = 1.0f / (float)XEDRAW::EXPOSED::SurfaceGetHeight(res->surfTileset);
for (int j = 0; j < tilemap.height; ++j) {
for (int i = 0; i < tilemap.width; ++i) {
//blah write stuff to data
}
}
XEDRAW::VertexBufferSetVertexDataInternal(*vbID, data, numVerts * vertSize);
free(data);
size_t offset = 0;
std::vector<uint32_t> vecIndex;
vecIndex.reserve(numQuads * 6);
for (size_t i = 0; i < numVerts; ++i) {
vecIndex.push_back(offset + 0);
vecIndex.push_back(offset + 1);
vecIndex.push_back(offset + 2);
vecIndex.push_back(offset + 0);
vecIndex.push_back(offset + 2);
vecIndex.push_back(offset + 3);
offset += 4;
}
XEDRAW::VertexBufferSetIndexDataInternal(*vbID, (uint8_t*)vecIndex.data(), numVerts * 6 * sizeof(uint32_t));
}
Essentially the existing Vertex Buffer is destroyed (and glDeleteBuffers is called for all associated buffers), then it is recreated using the functions above.
My issue is that whenever TilemapUpdateVertexBuffer() is called, there is a massive increase in RAM, it is like new memory is allocated to hold all that vertex data (which can be quite a lot, say, for a 512x512 map), but no other memory has been freed whatsoever.
Now I know that OpenGL implementations may reserve the right to delay freeing memory, however this leads to an eventual crash after scoring 6+ GB, most likely due to not enough memory space available.
Question 2: What am I doing wrong with this? Am I shaking something loose due to my existing setup? Banging my head here, would appreciate any help!!
EDIT: Using OpenGL 3.3+, GPU Nvidia Geforce RTX 2060
EDIT 2: Obviously, the memory spike occurs right after calling glBufferData, hence the title.