Small textures coloured incorrectly

Hello everyone,

I just recently started learning OpenGL by following this tutorial
http://www.opengl-tutorial.org/beginners-tutorials/tutorial-5-a-textured-cube/
and after altering the code to my likings I ran into behavior I cannot understand.

When I try to render a texture some pixels change their colour every time I run the code while others are always simply wrong.
Some examples:
[ATTACH=CONFIG]1820[/ATTACH]
gets rendered as
[ATTACH=CONFIG]1819[/ATTACH]
and
[ATTACH=CONFIG]1821[/ATTACH]
gets rendered as
[ATTACH=CONFIG]1822[/ATTACH]
(The images were scaled up to be visable but every coloured area is actually a single texel)
Note that the blue colour as background is intended but apart from the two at the top all texels have the wrong colour.
The colour of the bottom-right texel actually changes every time I run my code.
Everything works however when the resolution of the texture reaches 4 texels in width.
It seems to be independent from the height of the texture.

I also tried using one triangle instead of two but there was no change (except it only showing half of the corrupted texture).

I simplified the code as much as possible and ended up with this:
(I know it’s long but that’s the entire code needed to create this behavior and I wanted it to be possible for others to try)


#include <stdio.h>
#include <stdlib.h>

#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>

#include "src/common/config.hpp"
#include "src/common/shader.hpp"
#include "src/common/texture.hpp"

int main()
{
	//Initialise GLFW
	if( !glfwInit() )
	{
		fprintf( stderr, "Failed to initialize GLFW
" );
		getchar();
		return -1;
	}

	glfwWindowHint(GLFW_SAMPLES, 4); //Antialiasing 4x
	glfwWindowHint(GLFW_RESIZABLE,GL_TRUE);
	//OpenGL Version: 3.3
	glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
	glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
	glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); //OSX Compatability
	glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); //no old OpenGL

	//Open a window and create its OpenGL context
	GLFWwindow* window = glfwCreateWindow( 768, 512, PROGRAM_NAME, NULL, NULL);
	if( window == NULL ){
		fprintf( stderr, "Failed to open GLFW window. Intel GPUs are not always OpenGL 3.3 compatible.
" );
		getchar();
		glfwTerminate();
		return -1;
	}
	glfwMakeContextCurrent(window);

	//Initialize GLEW
	glewExperimental = true; // Needed for core profile
	if (glewInit() != GLEW_OK) {
		fprintf(stderr, "Failed to initialize GLEW
");
		getchar();
		glfwTerminate();
		return -1;
	}

	//Set background
	glClearColor(0.0f, 0.5f, 1.0f, 0.0f);
	
	printf("Vendor: %s, Version: %s, Renderer: %s
", glGetString(GL_VENDOR), glGetString(GL_VERSION), glGetString(GL_RENDERER));
	// --- Drawing Start ---
	
	//Enable depth test
	glEnable(GL_DEPTH_TEST);
	//Accept fragment if it closer to the camera than the former one
	glDepthFunc(GL_LESS); 

	GLuint VertexArrayID;
	glGenVertexArrays(1, &VertexArrayID);
	glBindVertexArray(VertexArrayID);

	GLuint programID = LoadShaders( "../src/shader/vertex_shader.glsl", "../src/shader/fragment_shader.glsl" );

	bool linear_interpolation = false;
	bool bgr = false;
	int width = 2, height = 2;
	unsigned char data[width*height*3] = {0xff, 0x00, 0xff,  0x00, 0x00, 0x00,  0x00, 0x00, 0x00,  0xff, 0x00, 0xff};
	printf("linint = %d
bgr = %d
width|height = %d|%d
data = %x %x %x | %x %x %x | %x %x %x | %x %x %x
", linear_interpolation, bgr, width, height, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11]);
	
	GLuint Texture;
	glGenTextures(1, &Texture);
	glBindTexture(GL_TEXTURE_2D, Texture);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, bgr? GL_BGR:GL_RGB, GL_UNSIGNED_BYTE, data);

	if(linear_interpolation){
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
		glGenerateMipmap(GL_TEXTURE_2D);
	}
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, linear_interpolation? GL_LINEAR:GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, linear_interpolation? GL_LINEAR_MIPMAP_LINEAR:GL_NEAREST);

	GLuint TextureID  = glGetUniformLocation(programID, "textureSampler");
	
	static const GLfloat g_vertex_buffer_data[] = 
	{
		 0.1f,  0.1f,
		 0.1f, -0.1f,
		-0.1f,  0.1f,
		 0.1f, -0.1f,
		-0.1f,  0.1f,
		-0.1f, -0.1f,
	};

	static const GLfloat g_uv_buffer_data[] = 
	{
		1.0f, 0.0f,
		1.0f, 1.0f,
		0.0f, 0.0f,
		1.0f, 1.0f,
		0.0f, 0.0f,
		0.0f, 1.0f,
	};
	
	GLuint vertexbuffer;
	glGenBuffers(1, &vertexbuffer);
	glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
	glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);

	GLuint uvbuffer;
	glGenBuffers(1, &uvbuffer);
	glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
	glBufferData(GL_ARRAY_BUFFER, sizeof(g_uv_buffer_data), g_uv_buffer_data, GL_STATIC_DRAW);

	do{
		glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
		glUseProgram(programID);
		
		// Bind our texture in Texture Unit 0
		glActiveTexture(GL_TEXTURE0);
		glBindTexture(GL_TEXTURE_2D, Texture);
		glUniform1i(TextureID, 0);

		// 1rst attribute buffer : vertices
		glEnableVertexAttribArray(0);
		glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
		glVertexAttribPointer(
			0,                  //attribute. No particular reason for 0, but must match the layout in the shader.
			2,                  //size
			GL_FLOAT,           //type
			GL_FALSE,           //normalized?
			0,                  //stride
			(void*)0            //array buffer offset
		);

		// 2nd attribute buffer : UVs
		glEnableVertexAttribArray(1);
		glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
		glVertexAttribPointer(
			1,                  //attribute. No particular reason for 1, but must match the layout in the shader.
			2,                  //size : U+V => 2
			GL_FLOAT,           //type
			GL_FALSE,           //normalized?
			0,                  //stride
			(void*)0            //array buffer offset
		);

		glDrawArrays(GL_TRIANGLES, 0, 2*3); // 12 Triangles

		glDisableVertexAttribArray(0);
		glDisableVertexAttribArray(1);

		//Swap buffers (redraw)
		glfwSwapBuffers(window);
		glfwPollEvents();

	} //Check if the ESC key was pressed or the window was closed
	while( !glfwWindowShouldClose(window) );
	
	//Cleanup VertexBufferObject
	glDeleteBuffers(1, &vertexbuffer);
	glDeleteBuffers(1, &uvbuffer);
	glDeleteTextures(1, &Texture);
	glDeleteVertexArrays(1, &VertexArrayID);
	glDeleteProgram(programID);

	glfwTerminate();

	return 0;
}

My shaders are as simple as possible:
Fragment shader:


#version 330 core

in  vec2 uv;
out vec3 colour;

uniform sampler2D textureSampler;

void main()
{
	colour = texture(textureSampler, uv).rgb;
}

Vertex shader:


#version 330 core

layout(location = 0) in vec2 vertexpos;
layout(location = 1) in vec2 vertexuv;

out vec2 uv;

void main()
{
	gl_Position = vec4(vertexpos,0,1); //z = 0, w = 1
	uv = vertexuv;
}

As mentioned in the guidelines:
OS: Linux, Debian Stretch
Graphics Card: AMD A10-5757M APU with Radeon™ HD Graphics
Output from glGetString(GL_VENDOR), glGetString(GL_VERSION), glGetString(GL_RENDERER):
Vendor: X.Org,
Version: 4.1 (Core Profile) Mesa 13.0.6
Renderer: Gallium 0.4 on AMD ARUBA (DRM 2.49.0 / 4.9.0-7-amd64, LLVM 3.9.1)

For now I circumvented the issue by upsampling the texture to 4 by 4 but I would really like to now why this is needed.

Because GL_UNPACK_ALIGNMENT defaults to 4.

Okay, so putting “glPixelStorei(GL_UNPACK_ALIGNMENT, 1);” at the beginning of my texture loading actually solved the issue, thank you for that hint.
Nevertheless I wonder why it is set to 4 in the first place.
Does it have any advantage? Is it more efficient?

It may be.

Note that it may also be more efficient to use GL_RGBA or GL_BGRA as the external format. Modern hardware typically uses 32-bpp internally for GL_RGB textures.