How to convert a cubemap into a 2d image?

I have not been able to advance at all regarding this. I will just post what is absolutely necessary and hopefully someone can help guide me as to what I am doing wrong.
Again, my intention is to obtain a 2D image from sampling a depth cubemap (a cubemap containing the depth information for the scene around the camera location) .

My result is just a blank image made up of 0s. I do not know why I am not getting any output FYI, depth_cube_tex is the depth cubemap.

My sampling procedure is as follows,

# SAMPLING CUBEMAP
##################
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS)

angle_step = 0.025
hangle = np.array([0, 360, angle_step], dtype=np.float32)
vangle = np.array([-90, 90, angle_step], dtype=np.float32)

width = int( (hangle[1]-hangle[0]) / hangle[2])
height = int( (vangle[1]-vangle[0]) / hangle[2])

# create scene buffer
scene_buffer = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, scene_buffer)

# generate color depth buffer
color_depth = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, color_depth)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_depth, 0) 

# send information to sampling shader
cubemap_sampling_shader.use()
cubemap_sampling_shader.set_uniform_int('depth_cube', depth_cube_tex)
cubemap_sampling_shader.set_uniform_vec3('display_size', glm.vec2(width,height))
cubemap_sampling_shader.set_uniform_float('angle_step', angle_step)

#draw to my scene buffer
glBindVertexArray(quad_vao)
glBindTexture(GL_TEXTURE_2D, color_depth)
glDrawArrays(GL_TRIANGLES, 0, 6)
glBindVertexArray(0)

# save output ?
zbuffer = glReadPixelsf(0, 0, width, height, GL_RED, GL_FLOAT)
zbuffer = zbuffer.reshape((height, width))
np.save('Scene', np.flipud(zbuffer))

glBindFramebuffer(GL_FRAMEBUFFER, 0) #scene_buffer

The 'cubemap_sampling` shader program has the following vertex shader,

#version 330 core
layout (location = 0) in vec2 aPos;

void main()
{ 
    gl_Position = vec4(aPos.x, aPos.y, 0.0f, 1.0f);
}

Given that I am not rendering any new geometry, I am uncertain about what to use for a vertex fragment. The vertex shader I am including here is thought with regards to a simple quad (see below). fragment shader is the following,

#version 330 core

uniform samplerCube depth_cube; 
uniform float angle_step;
uniform vec2 display_size; // width, height of display

out float depth;

void main()
{
    // phi the horizontal deviation angle
    float phi= (gl_FragCoord.x - display_size.x / 2 ) * radians(angle_step);
    
    // theta is the vertical deviation angle
    float theta = (gl_FragCoord.y - display_size.y / 2 ) * radians(angle_step);
    
    // create sampling direction vector
    float x = sin(theta) * cos(phi);
    float z = sin(theta) * sin(phi);
    float y = cos(theta);
    depth = texture(depth_cube,vec3(x,y,z)).r;
}

And my quad_vao is just two triangles spaning the entire screen in Normalize Device Coords.

You go through a lot of effort to compute x, y, and z, and then you don’t use them. Whatever depth is, it will have the same value for every fragment.

My bad @Alfonse_Reinheart , I was testing something and forgot to change this back to what it was. Now it is correct!

Okay… here is somewhat what I was after. I decided to read in a cubemap (borrowed from LearnOpenGL see link) rather than to use the depth cubemap I had originally generated in order to simplify my thinking. In the process of doing this I realized several things for which I really do not have a good answer (I will point these out).

Here is the code,

def loadCubemap(faces : list[str]) -> int:

    textureID = glGenTextures(1)
    glBindTexture(GL_TEXTURE_CUBE_MAP, textureID)

    for i in range(len(faces)):
        try:
            img = Image.open(faces[i])

            glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, img.width, img.height, 0, GL_RGB, GL_UNSIGNED_BYTE, img.tobytes())

            img.close()
            
        except:
            print("Cubemap texture failed to load at path: " + faces[i])


    glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
    glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
    glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
    glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
    glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE)
    glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS)


    return textureID

# initialize camera
camera = Camera(position=(0,0,0))
camera.width = 720
camera.height = 360

# initialize opengl
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS, 32) # not here before

# create glfw window
window = glfw.create_window(camera.width, camera.height, 'LearnOpengl', None, None)
if window is None:
    print("Failed to create GLFW window")
    glfw.terminate()           
glfw.make_context_current(window)

# load textures
# -------------
faces = [
    pth / "textures/skybox/right.jpg",
    pth / "textures/skybox/left.jpg",
    pth / "textures/skybox/top.jpg",
    pth / "textures/skybox/bottom.jpg",
    pth / "textures/skybox/front.jpg",
    pth / "textures/skybox/back.jpg",
]

cubemapTexture = loadCubemap(faces)

# Create VAO and VBO of quad for rendering

# vertex attributes for a quad that fills the entire screen in Normalized Device Coordinates.
#    positions /  texture   
quad_vertices = np.array( 
    [-1.0,  1.0,  0.0, 1.0,
    -1.0, -1.0,  0.0, 0.0,
    1.0, -1.0,  1.0, 0.0,

    -1.0,  1.0,  0.0, 1.0,
    1.0, -1.0,  1.0, 0.0,
    1.0,  1.0,  1.0, 1.0],
    dtype= np.float32)

# create quad vao and vbo
quad_vao = glGenVertexArrays(1)
glBindVertexArray(quad_vao)

quad_vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, quad_vbo)
glBufferData(GL_ARRAY_BUFFER, quad_vertices.nbytes, quad_vertices, GL_STATIC_DRAW)

# position attribute
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * 4, ctypes.c_void_p(0)) 
glEnableVertexAttribArray(0) # position

# texture attribute
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * 4, ctypes.c_void_p(2 * 4))

# compile shader
shader = Shader(pth / 'depthcube/shader/equirectangular.vs', pth / 'depthcube/shader/equirectangular.fs')

# SAMPLING PARAMETERS
hangle = np.array([0, 360, 0.025])
vangle = np.array([-90, 90, 0.025])
width = int((hangle[1]-hangle[0]) / hangle[2])
height = int((vangle[1]-vangle[0]) / vangle[2])

A pointer here. The dimensions of width and height can have as a maximum the value of GL_MAX_VIEWPORT_DIMS. Hence, there is a limit to how fine the sampling can be. I would be interested to learn if this can be increased somehow?


# CREATE FRAMEBUFFER

fbo = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, fbo)

texcolor = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texcolor)
#glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, None)
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, width, height, 0, GL_RED, GL_FLOAT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texcolor, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)


# RENDER
glViewport(0,0,width,height)

# use shader
shader.use()

shader.set_uniform_float('hrange', hangle[1]-hangle[0])
shader.set_uniform_float('vrange', vangle[1]-vangle[0])
shader.set_uniform_vec2('size_display', glm.vec2(width,height))
shader.set_uniform_vec2('offset', glm.vec2(-90.0, -45.0))
shader.set_uniform_int('depthcube', 0) #cubemapTexture WHY 0???

Here is an interesting point I was stucked for the longest time. I thought that in order to introduce the cubemap to my shader I needed to pass on the texture id of the cubemap, ie. the value returned by the loadCubemap() function above. I was wrong, instead for the process to work I just passed the value of 0. I do not have a good explanation as to why this is so perhaps someone could explain this.

# render to fbo
glBindFramebuffer(GL_FRAMEBUFFER, fbo)

glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)


shader.use()
glBindVertexArray(quad_vao)
glDrawArrays(GL_TRIANGLES, 0, 6)
#glBindVertexArray(0)

glfw.swap_buffers(window)

# save #output ?
zbuffer = glReadPixelsf(0, 0, width, height, GL_RED, GL_FLOAT)
zbuffer = zbuffer.reshape((height, width))
np.save('Scene', np.flipud(zbuffer))

# unbind
glBindFramebuffer(GL_FRAMEBUFFER, 0)

The shaderprogram has the following vertex shader,

#version 330 core

layout (location = 0) in vec2 position; 


void main(){
    gl_Position = vec4(position.x, position.y, 0.0f, 1.0f);
}

and fragment shader,

#version 330 core

uniform vec2 size_display;
uniform vec2 offset;
uniform float hrange;
uniform float vrange;
uniform samplerCube depthcube;

//out vec4 color;
out float color;

void main()
{
    // scale coordinates (0,1)
    vec2 scaleCoord = gl_FragCoord.xy / size_display;

    // convert offset to radians
    vec2 off = vec2(radians(offset.x), radians(offset.y));

    // center the view
    vec2 angles = ((scaleCoord * 2.0) - vec2(1.0)) * vec2(radians(hrange/2), radians(vrange/2)) + off;

    // sampling vector
    vec3 samplingVec = vec3(-cos(angles.y) * cos(angles.x), sin(angles.y), cos(angles.y) * sin(angles.x));

    // output
    color = texture(depthcube, samplingVec).r;

Using the above, I am able to sample a cubemap using spherical coordinates (adapted to OpenGl specs).

You pass the number of the texture unit to which the texture you want to sample is bound. The active texture unit is changed with glActiveTexture.

@carsten_neumann Thanks for the clarification!