glMapBufferRange doesn't set the value of of compute shader array

Hi everyone, new poster here. I’ve been trying to implement tiled deferred shading and I’ve been stuck on a problem for some time now. I’m trying to store an array of structs consisting of point lights that I want to initalize and send to a compute shader where I can process it further. I’m using a Shader Storage Buffer Object for this and I’m trying to use glMapBufferRange to provide the values. I’ve added some checks so that if the array size is 0 I’ll paint the screen red and if it’s larger than 0 it’s yellow, so far I can’t seem to get it to work.

Here’s the compute shader:


#version 430
#define MAX_WORK_GROUP_SIZE 16
#define SCREEN_WIDTH 1280.0f
#define SCREEN_HEIGHT 720.0f


uniform sampler2D positionMap;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;

layout(binding = 4, rgba32f) uniform writeonly image2D finalImage;
layout(binding = 5, rgba32f) uniform writeonly image2D otherImage;

struct PointLight
{
	vec3 position; //4,8,12
	vec3 color; // 16,20, 24
	float radius; //28
	float diffuseIntensity; //32
	float ambientIntensity; //36
	float Constant; //40
	float Linear; //44
	float Exp; //48
};

layout(std430, binding = 6) buffer BufferObject
{
	PointLight pointLights[];
};

shared uint minDepth;
shared uint maxDepth;

layout(local_size_x = MAX_WORK_GROUP_SIZE, local_size_y = MAX_WORK_GROUP_SIZE)in;

void main()
{

	if(gl_LocalInvocationIndex == 0){
        minDepth = 0xFFFFFFFF;
        maxDepth = 0;
    }
	ivec2 pixelPos = ivec2(gl_GlobalInvocationID.xy);
	vec2 uv = vec2(pixelPos.x / SCREEN_WIDTH, pixelPos.y / SCREEN_HEIGHT);

	
	float d = texture(depthMap,uv).z;
	
	uint depth = uint(d * 0xFFFFFFFF);

	//compares the content of minDepth to depth and writes the minimum value to minDepth
	atomicMin(minDepth, depth);
//	barrier();
	//compares the content of maxDepth to depth and writes the maximum value to the maxDepth
	atomicMax(maxDepth, depth);



	///Write a single texel into an image
/*	barrier();
	imageStore(finalImage, pixelPos, vec4(float(float(maxDepth) / float(0xFFFFFFFF))));
	
	barrier();
	imageStore(otherImage, pixelPos, vec4(float(float(minDepth) / float(0xFFFFFFFF))));
	*/
	
	PointLight p = pointLights[0];
	PointLight p2 = pointLights[1];
	if(pointLights.length() == 0)
	{
		barrier();
		imageStore(finalImage, pixelPos, vec4(1.0,0.0,0.0,1.0));
	
		barrier();
		imageStore(otherImage, pixelPos, vec4(1.0,0.0,0.0,1.0));
	}
	if(pointLights.length() > 0)
	{
		barrier();
		imageStore(finalImage, pixelPos, vec4(1.0,1.0,0.0,1.0));
	
		barrier();
		imageStore(otherImage, pixelPos, vec4(1.0,1.0,0.0,1.0));
	}

}

Here’s how I’m trying to initalize the buffer with some test values:


My3dVector currentColor(1.0f,1.0f,1.0f);
	glGenBuffers(1,&m_pointLightBuffer);
	glBindBuffer(GL_SHADER_STORAGE_BUFFER,m_pointLightBuffer);
	glBufferData(GL_SHADER_STORAGE_BUFFER,NUM_OF_LIGHTS*sizeof(struct TDPointLight), NULL, GL_STATIC_DRAW);

	struct TDPointLight* pointlights = (struct TDPointLight*) glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, NUM_OF_LIGHTS*sizeof(struct TDPointLight), GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT );

	int shit = ARRAY_SIZE_IN_ELEMENTS(pointlights);
	for(int i = 0; i < NUM_OF_LIGHTS; ++i)
	{
		float Max  = 80.0f;
		float Min = -80.0f;

		float MaxZ = 80.0f;
		float MinZ = -80.0f;

		float ranx = ((float(rand()) / float(RAND_MAX)) * (Max - Min)) + Min;
		float ranz = ((float(rand()) / float(RAND_MAX)) * (Max - Min)) + Min;

		int maxCol = 8;
		int minCol = 1;
		//int ranCol = ((rand() / RAND_MAX) * (maxCol - minCol)) + minCol;
		int ranCol = (rand()%(maxCol-minCol))+minCol;

		if(ranCol == 0)
			printf("error, color 8 doesnt exist");
		if(ranCol == 1)
			currentColor = COLOR_WHITE;
		if(ranCol == 2)
			currentColor = COLOR_RED;
		if(ranCol == 3)
			currentColor = COLOR_GREEN;
		if(ranCol == 4)
			currentColor = COLOR_CYAN;
		if(ranCol == 5)
			currentColor = COLOR_BLUE;
		if(ranCol == 6)
			currentColor = COLOR_PURPLE;
		if(ranCol == 7)
			currentColor = COLOR_ORANGE;
		if(ranCol == 8)
			printf("error, color 8 doesnt exist");

		pointlights[i].position = My3dVector(1.0f,1.0f,1.0f);
		pointlights[i].color = My3dVector(1.0f,0.0f,0.0f);
		pointlights[i].radius = 10.0f;
		pointlights[i].diffuseIntensity = 10.0f;
		pointlights[i].ambientIntensity = 0.1f;
		//pointlights[i].color = currentColor;
		//pointlights[i].position = My3dVector(ranx,3.0f,ranz);
		//m_pointLight[i].m_Position = My3dVector(0.0f,2.0f,0.0f);
		pointlights[i].Constant = 0.0f;
		pointlights[i].Linear = 0.0f;
		pointlights[i].Exp = 0.6f;
	}
	glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);


Then the process goes something like:

  1. Use the compute shader
  2. Set all the uniforms, then bind some deferred textures and other things using this code

	for(unsigned int i = 0; i<ARRAY_SIZE_IN_ELEMENTS(m_textures); ++i)
	{
		glActiveTexture(GL_TEXTURE0 + i);
		glBindTexture(GL_TEXTURE_2D, m_textures[TDGBuffer_TEXTURE_TYPE_POSITION + i]);
	}
	glUniform1i(glGetUniformLocation(program,"depthMap"),3);
	glActiveTexture(GL_TEXTURE3);
	glBindTexture(GL_TEXTURE_2D,m_depthTexture);

	//glActiveTexture(GL_TEXTURE4);
	//glBindTexture(GL_TEXTURE_2D,m_finalTexture);
	glBindImageTexture(4, m_finalTexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);

	glBindImageTexture(5,m_otherTexture,0,GL_FALSE,0,GL_WRITE_ONLY,GL_RGBA32F);
	
	glBindBufferBase(GL_SHADER_STORAGE_BUFFER,6,m_pointLightBuffer);

Then finally in the main loop call these functions


	glDispatchCompute((window_width/MAX_WORK_GROUP_SIZE), (window_height/MAX_WORK_GROUP_SIZE), 1);
	glFinish();

It doesn’t work with any memory barriers anywhere in the code, but as I’m not modifying the contents anywhere this shouldn’t be a problem as it just gets initialized once and then remains the same forever.

A couple of things to look at

  1. I have had trouble with std430 on AMD cards not packing correctly

  2. avoid using vec3 in your structures; use vec4 and put one of your extra variables in the spare float.

Thanks for the response. I tried rearranging the structures it didn’t appear to help. I’m also running a nvidia card. I’m really confused about what’s happening since I call the glmapbufferrange after I have created and used the compute shader, so it should recieve the data.

Okay I think I have found a solution, after rearranging the structures so they look like this


struct PointLight
{
	vec4 position; //4,8,12
	//float radius; //28
	vec4 color; // 16,20, 24
	
	float diffuseIntensity; //32
	//float ambientIntensity; //36
	float Constant; //40
	float Linear; //44
	float Exp; //48
};

and


struct TDPointLight
{
	My3dVector position; //4,8,12
	float radius; //28
	My3dVector color; // 16,20, 24
	float ambientIntensity; //36
	float diffuseIntensity; //32
	
	float Constant; //40
	float Linear; //44
	float Exp; //48
};

I can now print out the correct color to the framebuffer using:


	PointLight p = pointLights[3];
	PointLight p2 = pointLights[55];
	
	//vec3 test = vec3(p.posX,p.posY,p.posZ);
	//vec3 test2 = vec3(p2.posX,p2.posY,p2.posZ);
	vec4 test = p.color;
	vec4 test2 = p2.color;
	
	barrier();
	imageStore(finalImage, pixelPos, test);
	
	barrier();
	imageStore(otherImage, pixelPos, test2);

Which appears to be working just fine, I think I’ll just add some extra floats that are constantly 1 for the w value of the vec4. I think the problem is that the length function either doesn’t work as I thought it did or I’ve done something horribly wrong. I still get a length size of 0 for it, which I think is because it didn’t update the length of the array when it gets set by a buffer, which seems logical.

You might want to read the memory layout section on this page. I always use std140 which is very specifiy about the layout.