Texture tearing issue

I’ve been tearing my hair out over this issue. I’ve got a very simple graphics renderer that I recently changed from simply taking one mesh at a time and doing a draw call on it to saving them all up and doing one draw call after everything has been submitted. Before there was no tearing problem, and now there is, as shown in this video:

Texture tearing video

The program does in fact run at 60 fps, but the screen capture program would not run that fast. Also, the GUI elements are created using CEGUI which uses its own renderer.

I have verified that doublebuffering and vsync are both enabled, and I have a frame limiter set at 60 fps. I’ve changed this to 120 and 30, but the problem remains.

For reference, my shaders:

Vertex shader:


#version 330

in DATA{
 vec4 position;
 vec2 texCoord;
 vec3 normal;
 vec4 color;
 float tid;
} vs_in;

out vec4 color;


//uniform sampler2D diffuse;
uniform sampler2D textures[32];

void main()
{
    color = vs_in.color;
    if(vs_in.tid > 0.0)
    { // Correct to use multiple textures - texture2D(mytexture, mycoord)
       //gl_FragColor = texture2D(diffuse, vs_in.texCoord) * vs_in.color;
       int tid = int(vs_in.tid + 0.5);
       color = texture(textures[tid], vs_in.texCoord) * vs_in.color;
    }
}

Fragment shader


#version 330

layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
layout (location = 3) in vec4 color;
layout (location = 4) in float tid;


uniform mat4 transform;
uniform mat4 projection;
uniform mat4 view = mat4(1.0);
uniform mat4 model = mat4(1.0);

out DATA{
 vec4 position;
 vec2 texCoord;
 vec3 normal;
 vec4 color;
 float tid;
} vs_out;


void main()
{
	gl_Position = projection * transform * vec4(position, 1.0);
	vs_out.position = gl_Position;
	vs_out.texCoord = texCoord;
	vs_out.normal = normal;
	vs_out.color = color;
	vs_out.tid = tid;
}

Here is the code for my renderer.
BatchRenderEngine.h


#pragma once
#include "RenderEngine.h"

const unsigned int MAX_VERTS = 40000;
const unsigned int MAX_INDICES = 60000;
const unsigned int MAX_TEXTURES = 32;

class BatchRenderEngine :
	public RenderEngine
{
public:
	BatchRenderEngine(int width, int height);
	virtual ~BatchRenderEngine();
	virtual void render();
protected:
	virtual void init();
	virtual void enqueue(Renderable r, GLuint * indices);
	virtual void startBatch();
	virtual void flush();
	virtual void endBatch();

	Vertex * _vertices;
	GLuint * _indices;

	int _texIDs[MAX_TEXTURES];

	GLuint _indBuffer;

	float _textures[MAX_TEXTURES];

	unsigned int _currVerts;
	unsigned int _currInds;
	unsigned int _currTextures;

	GLuint _VBO, _VAO;

};

BatchRenderEngine.cpp


#include "BatchRenderEngine.h"


BatchRenderEngine::BatchRenderEngine(int width, int height) : RenderEngine(width, height)
{
	init();
}

void BatchRenderEngine::init()
{
	for (int i = 0; i < MAX_TEXTURES; i++)
		_texIDs[i] = i;

	_shader->Bind();
	_shader->setUniform1iv("textures", MAX_TEXTURES, _texIDs);

	glGenVertexArrays(1, &_VAO);
	glGenBuffers(1, &_VBO);

	glBindVertexArray(_VAO);
	glBindBuffer(GL_ARRAY_BUFFER, _VBO);
	glBufferData(GL_ARRAY_BUFFER, MAX_VERTS*sizeof(Vertex), NULL, GL_DYNAMIC_DRAW);

	glEnableVertexAttribArray(POSITION_VB);
	glEnableVertexAttribArray(TEXCOORD_VB);
	glEnableVertexAttribArray(NORMAL_VB);
	glEnableVertexAttribArray(COLOR_VB);
	glEnableVertexAttribArray(TID_VB);

	glVertexAttribPointer(POSITION_VB, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*)0);
	glVertexAttribPointer(TEXCOORD_VB, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*)(offsetof(Vertex, texCoord)));
	glVertexAttribPointer(NORMAL_VB, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*)(offsetof(Vertex, normal)));
	glVertexAttribPointer(COLOR_VB, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*)(offsetof(Vertex, color)));
	glVertexAttribPointer(TID_VB, 1, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*)(offsetof(Vertex, tid)));

	glGenBuffers(1, &_indBuffer);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indBuffer);
	glBufferData(GL_ELEMENT_ARRAY_BUFFER, MAX_INDICES * sizeof(GLuint), NULL, GL_STATIC_DRAW);

	glBindBuffer(GL_ARRAY_BUFFER, 0);

	startBatch();
}


BatchRenderEngine::~BatchRenderEngine()
{
	SDL_GL_DeleteContext(_glContext);
	SDL_DestroyWindow(_window);

	glDeleteBuffers(NUM_BUFFERS, &_VBO);
	glDeleteVertexArrays(1, &_VAO);
}

void BatchRenderEngine::startBatch()
{
	int i;
	_shader->Bind();
	_shader->Update(*_transform);

	glBindBuffer(GL_ARRAY_BUFFER, _VBO);


	// Error - second time this is mapped, _vertices comes back with NULL pointer
	_vertices = (Vertex*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
	if (_vertices == NULL)
		std::cout << "Error - failure to map _vertices
";

	glBindBuffer(GL_ARRAY_BUFFER, 0);

	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indBuffer);
	_indices = (GLuint *)glMapBuffer(GL_ELEMENT_ARRAY_BUFFER, GL_WRITE_ONLY);
	if (_indices == NULL)
		std::cout << "Error - failure to map _indices
";

	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);

	// Reset indices
	for (int i = 0; i < MAX_INDICES; i++)
	{
		_indices[i] = 0;
	}

	// reset textures
	for (int i = 0; i < MAX_TEXTURES; i++)
	{
		_textures[i] = 0.0f;
	}

	// reset vertices
	for (i = 0; i<MAX_VERTS; i++)
	{
		_vertices[i].pos = glm::vec3(0.0f);
		_vertices[i].texCoord = glm::vec2(0.0f);
		_vertices[i].normal = glm::vec3(0.0f);
		_vertices[i].color = glm::vec4(0.0f);
		_vertices[i].tid = 0.0f;
	}

	_currVerts = 0;
	_currInds = 0;
	_currTextures = 1;

	return;
}

void BatchRenderEngine::enqueue(Renderable r, GLuint * indices)
{
	// Check in regard to drawString function - error?

	unsigned int vertNum;
	unsigned int indNum;
	unsigned int i;
	const GLuint tid = (GLuint)r.getTid();
	float ts = 0.0f;

	vertNum = r.getVertices().size();
	indNum = vertNum / 4 * 6;  // Assumes all input renderables are sprites - fix


	if (vertNum + _currVerts > MAX_VERTS || indNum + _currInds > MAX_INDICES || _currTextures >= MAX_TEXTURES) // we've reached our limit, draw everything and restart
	{
		endBatch();
		flush();
		startBatch();
	}

	// Error possibly here - incorrect textures
	if (tid > 0)
	{
		bool found = false;
		for (unsigned int i = 0; i < _currTextures; i++)
		{
			if (_textures[i] == tid)
			{
				ts = (float)i;
				found = true;
				break;
			}
		}

		if (!found)
		{
			if (_currTextures >= MAX_TEXTURES)
			{
				endBatch();
				flush();
				startBatch();
			}
			ts = _currTextures * 1.0f;
			_textures[_currTextures] = tid * 1.0f;
			_currTextures++;
		}
	}
	else
		ts = -1.0f;



	std::vector<Vertex *> verts = r.getVertices();

	if (indices == NULL)
	{
		indices = new GLuint[6];
		indices[0] = 0;
		indices[1] = 1;
		indices[2] = 2;
		indices[3] = 2;
		indices[4] = 3;
		indices[5] = 0;
		indNum = 6;
	}


	// Store all verts and attributes in arrays
	for (i = 0; i<vertNum; i++)
	{
		_vertices[_currVerts + i].pos = *(verts[i]->GetPos());
		_vertices[_currVerts + i].texCoord = *(verts[i]->GetTexCoord());
		_vertices[_currVerts + i].normal = *(verts[i]->GetNormal());
		_vertices[_currVerts + i].color = *(verts[i]->GetColor());
		_vertices[_currVerts + i].tid = ts;
	}

	// Store all indices in an array
	for (i = 0; i < indNum; i++)
	{
		_indices[_currInds + i] = indices[i] + _currVerts;
	}

	_currVerts += vertNum;
	_currInds += indNum;
}

void BatchRenderEngine::endBatch()
{
	glBindBuffer(GL_ARRAY_BUFFER, _VBO);
	if (!glUnmapBuffer(GL_ARRAY_BUFFER))
		std::cout << "Error - glUnmapBuffer _vertices failed
";
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indBuffer);
	if (!glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER))
		std::cout << "Error - glUnmapBuffer _indices failed
";


	glBindBuffer(GL_ARRAY_BUFFER, 0);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);

	// Set up texture
	for (unsigned int i = 0; i < _currTextures; i++)
	{
		glActiveTexture(GL_TEXTURE0 + i);
		glBindTexture(GL_TEXTURE_2D, (GLuint)_textures[i]);
	}
}


void BatchRenderEngine::flush()
{
	glBindVertexArray(_VAO);
	glBindBuffer(GL_ARRAY_BUFFER, _VBO);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indBuffer);

	glFinish();

	glDrawElements(GL_TRIANGLES, _currInds, GL_UNSIGNED_INT, 0);

	glBindVertexArray(0);
	glBindBuffer(GL_ARRAY_BUFFER, 0);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}

void BatchRenderEngine::render()
{
	endBatch();
	flush();

	glUseProgram(0);
	glBindTexture(GL_TEXTURE_2D, 0);
	glActiveTexture(GL_TEXTURE0);

	CEGUI::System::getSingleton().renderAllGUIContexts();

	glDisable(GL_SCISSOR_TEST); // Prevents flickering caused by CEGUI call

	swapWindowBuffer();

	clear(0.0f, 0.15f, 0.3f, 1.0f);

	startBatch();

}

Any thoughts?

My texture object:

Texture.h


// Source: https://github.com/BennyQBD/ModernOpenGLTutorial

#ifndef TEXTURE_H
#define TEXTURE_H

#include <string>
#include <GL/glew.h>

class Texture
{
public:
	Texture(std::string fileName, GLuint pixelFormat);
	Texture(unsigned char * data, int width, int height, GLuint pixelFormat);
	Texture(GLuint ID);

    void loadFromFile8(std::string fileName );
    void loadFromFile32(std::string fileName );
    void loadFromData8(unsigned char * data, int width, int height);
    void loadFromData32(unsigned char * data, int width, int height);
	void loadFromID(GLuint ID);
	void Bind(unsigned int unit);

	GLuint getTID() { if (this == NULL) return -1; else return m_texture; }

	virtual ~Texture();
protected:
private:
	Texture(const Texture& texture) {}
	void operator=(const Texture& texture) {}

	GLuint m_texture;
	GLuint m_pixelFormat;
};

#endif

Texture.cpp


// Source: https://github.com/BennyQBD/ModernOpenGLTutorial

#include "texture.h"
#include "stb_image.h"
#include <iostream>
#include <cassert>

Texture::Texture(std::string fileName, GLuint pixelFormat)
{
	m_texture = 0;

    if(pixelFormat == GL_ALPHA)
        loadFromFile8(fileName);
    else if(pixelFormat == GL_RGBA)
        loadFromFile32(fileName);
}

Texture::Texture(unsigned char * data, int width, int height, GLuint pixelFormat)
{
	m_texture = 0;

    if(pixelFormat == GL_ALPHA)
        loadFromData8(data, width, height);
    else if(pixelFormat == GL_RGBA)
        loadFromData32(data, width, height);
}

Texture::Texture(GLuint ID)
{
	m_texture = 0;

	loadFromID(ID);
}

Texture::~Texture()
{
	glDeleteTextures(1, &m_texture);
}

void Texture::loadFromFile8(std::string fileName)
{
	int width, height, numComponents;
    unsigned char* data = stbi_load((fileName).c_str(), &width, &height, &numComponents, 4);

	if (data == NULL)
	{
		std::cout << "Error - stbi_load failed.
";
		return;
	}

    loadFromData8(data, width, height);

    stbi_image_free(data);
}

void Texture::loadFromFile32(std::string fileName)
{
	int width, height, numComponents;
    unsigned char* data = stbi_load((fileName).c_str(), &width, &height, &numComponents, 4);

    loadFromData32(data, width, height);

    stbi_image_free(data);
}


void Texture::loadFromData8(unsigned char * data, int width, int height)
{
    if(data == NULL)
		std::cerr << "Unable to load texture! " << std::endl;

    glGenTextures(1, &m_texture); // Generates space for a texture, puts handle in m_texture
    glBindTexture(GL_TEXTURE_2D, m_texture);

    // Makes the texture wrap around the mesh that is drawn
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);

    // Interpolation based on rotated mesh
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    // Copy data to GPU
    glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, width, height, 0, GL_ALPHA, GL_UNSIGNED_BYTE, data);
//    glTexImage2D()
}

void Texture::loadFromData32(unsigned char * data, int width, int height)
{
    if(data == NULL)
		std::cerr << "Unable to load texture! " << std::endl;

    glGenTextures(1, &m_texture); // Generates space for a texture, puts handle in m_texture
    glBindTexture(GL_TEXTURE_2D, m_texture);

    // Makes the texture wrap around the mesh that is drawn
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);

    // Interpolation based on rotated mesh
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    // Copy data to GPU
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
//    glTexImage2D()
}

void Texture::loadFromID(GLuint ID)
{
	m_texture = ID;


}


void Texture::Bind(unsigned int unit)
{
    assert(unit >=0 && unit <= 31);

    glActiveTexture(GL_TEXTURE0 + unit);
	glBindTexture(GL_TEXTURE_2D, m_texture);

}

I haven’t picked through your code, but I don’t see tearing; I see corruption. Your “Welcome to” text seems to get corrupted differently as the video goes on.

Tearing is when you have one or more horizontal line artifacts across the screen that indicates that incomplete frames are being scanned out to the video output due to the rendering not being synchronized with the vertical retrace (scan-out clock).

Strange. Yeah, it’s definitely a texture thing - the plain box that comes up with the FPS has no texture and therefore has no issues.

Also, this has happened only on one computer. One other one works fine, and two others threw shader errors. I found something about some cards not supporting non-constant index values. I changed my code and one of the computers that threw errors no longer does. My fragment shader now looks like this:


#version 330

in DATA{
 vec4 position;
 vec2 texCoord;
 vec3 normal;
 vec4 color;
 float tid;
} vs_in;

out vec4 color;


//uniform sampler2D diffuse;
uniform sampler2D textures[32];

void main()
{
    color = vs_in.color;
    if(vs_in.tid > 0.0)
    { // Correct to use multiple textures - texture2D(mytexture, mycoord)
       //gl_FragColor = texture2D(diffuse, vs_in.texCoord) * vs_in.color;
       int tid = int(vs_in.tid + 0.5);

       // no variables?  Trying constant expressions
       if(tid == 0)
            color = texture(textures[0], vs_in.texCoord) * vs_in.color;
       if(tid == 1)
            color = texture(textures[1], vs_in.texCoord) * vs_in.color;
       if(tid == 2)
            color = texture(textures[2], vs_in.texCoord) * vs_in.color;
       if(tid == 3)
            color = texture(textures[3], vs_in.texCoord) * vs_in.color;
       if(tid == 4)
            color = texture(textures[4], vs_in.texCoord) * vs_in.color;
       if(tid == 5)
            color = texture(textures[5], vs_in.texCoord) * vs_in.color;
       if(tid == 6)
            color = texture(textures[6], vs_in.texCoord) * vs_in.color;
       if(tid == 7)
            color = texture(textures[7], vs_in.texCoord) * vs_in.color;
       if(tid == 8)
            color = texture(textures[8], vs_in.texCoord) * vs_in.color;
       if(tid == 9)
            color = texture(textures[9], vs_in.texCoord) * vs_in.color;
       if(tid == 10)
            color = texture(textures[10], vs_in.texCoord) * vs_in.color;
       if(tid == 11)
            color = texture(textures[11], vs_in.texCoord) * vs_in.color;
       if(tid == 12)
            color = texture(textures[12], vs_in.texCoord) * vs_in.color;

    }
}

It’s ugly as hell, but it seems to work. I can’t check on the original computer with the corruption until tonight.

It’s a bit more complicated than that. Opaque types in GLSL are different from non-opaque types; there are special rules that they have to follow. Specifically, arrays of opaque types are handled specially, depending on the OpenGL version.

In OpenGL 3.3 and below, arrays of opaque types can exist, but the array indices must be integral constant expressions. In OpenGL 4.0+, arrays of opaque types can be indexed by dynamically uniform expressions.

Input variables are neither constant nor dynamically uniform. Thus, any expression that uses them are not dynamically uniform. So there is no version of OpenGL where accessing them has defined results, and therefore undefined behavior results.

Granted on some cards this will work; “undefined behavior” allows for the possibility of “does what I think it should”. But you cannot rely on that behavior. Even for cards where it seems to work, you can’t be sure it’ll work in the next rendering pass. Or under a full moon. Or whatever.

Oh, and your solution:

[QUOTE=Shajenko;1279076]I changed my code and one of the computers that threw errors no longer does. My fragment shader now looks like this:


#version 330

in DATA{
 vec4 position;
 vec2 texCoord;
 vec3 normal;
 vec4 color;
 float tid;
} vs_in;

out vec4 color;


//uniform sampler2D diffuse;
uniform sampler2D textures[32];

void main()
{
    color = vs_in.color;
    if(vs_in.tid > 0.0)
    { // Correct to use multiple textures - texture2D(mytexture, mycoord)
       //gl_FragColor = texture2D(diffuse, vs_in.texCoord) * vs_in.color;
       int tid = int(vs_in.tid + 0.5);

       // no variables?  Trying constant expressions
       if(tid == 0)
            color = texture(textures[0], vs_in.texCoord) * vs_in.color;
       if(tid == 1)
            color = texture(textures[1], vs_in.texCoord) * vs_in.color;
       if(tid == 2)
            color = texture(textures[2], vs_in.texCoord) * vs_in.color;
       if(tid == 3)
            color = texture(textures[3], vs_in.texCoord) * vs_in.color;
       if(tid == 4)
            color = texture(textures[4], vs_in.texCoord) * vs_in.color;
       if(tid == 5)
            color = texture(textures[5], vs_in.texCoord) * vs_in.color;
       if(tid == 6)
            color = texture(textures[6], vs_in.texCoord) * vs_in.color;
       if(tid == 7)
            color = texture(textures[7], vs_in.texCoord) * vs_in.color;
       if(tid == 8)
            color = texture(textures[8], vs_in.texCoord) * vs_in.color;
       if(tid == 9)
            color = texture(textures[9], vs_in.texCoord) * vs_in.color;
       if(tid == 10)
            color = texture(textures[10], vs_in.texCoord) * vs_in.color;
       if(tid == 11)
            color = texture(textures[11], vs_in.texCoord) * vs_in.color;
       if(tid == 12)
            color = texture(textures[12], vs_in.texCoord) * vs_in.color;

    }
}

It’s ugly as hell, but it seems to work. I can’t check on the original computer with the corruption until tonight.[/QUOTE]

You’ve avoided the dynamically uniform part, but you’ve run straight into another problem. Your texture function calls are in what is known as non-uniform control flow. This means that different shader instantiations can take different paths, some calling that function and others not calling it.

Well, texture needs automatically computed gradients to do mipmapping and filtering. And non-uniform control flow [i]breaks[/i] automatic gradients. So you’ll have to compute the gradients yourself outside of the conditionals and then use them with textureGrad inside the conditions.

Interesting…

Thanks to both of you. The texture corruption issue is gone, and I’ll work on the computing gradients issue as well.