I am getting a result that simply does not make sense to me given the code.

I use glfw to create a simple interactive display where I am rendering two cubes of different size and a floor (I am using this as a testing ground for other calculations I want to develop). I can zoom in/out and move the camera at will. When I press ‘c’ I want to save the distance from the camera position to the cubes and floor. After that I just want to return to exploring the scene.

The shader I use to ‘capture’ this distance is very basic (the one used to render on the screen is not shown here).

Vertex:

```
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
out vec4 pos;
void main() {
pos = view * model * vec4(aPos, 1.0f);
gl_Position = projection * view * model * vec4(aPos, 1.0f);}
```

Fragment:

```
#version 330 core
in vec4 pos;
out float depth;
void main() {
depth = length(pos.xyz); }
```

The framebuffer to capture this information is created before the rendering loop,

```
# create depth buffer
fbo= glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, fbo)
# COLOR DEPTH
color_depth_tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, color_depth_tex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, SCR_WIDTH, SCR_HEIGHT, 0, GL_RED, GL_FLOAT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, 0)
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, color_depth_tex, 0)
# DEPTH
depth_tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, depth_tex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, SCR_WIDTH, SCR_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, 0)
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, depth_tex, 0)
```

The rendering loop is,

```
shader= shaderDisplay
while (not glfwWindowShouldClose(window)):
# input
processInput(window)
# define basic viewing parameters
view = camera.GetViewMatrix()
projection = glm.perspective(glm.radians(camera.Zoom), SCR_WIDTH / SCR_HEIGHT, 0.1, 100.0)
if camera.capture:
# bind the framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, fbo)
glClearDepth(depth_tex)
# set shader to shader depth
shader = shaderDepth
# clear colors and buffers
glClearColor(0.1, 0.1, 0.1, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
# set uniforms on shader
shader.use()
shader.setMat4("view", view)
shader.setMat4("projection", projection)
# RENDER OBJECTS
# draw floor
glBindVertexArray(planeVAO)
glBindTexture(GL_TEXTURE_2D, floorTexture)
shader.setMat4("model", glm.mat4(1.0))
glDrawArrays(GL_TRIANGLES, 0, 6)
glBindVertexArray(0)
# cube 1
glBindVertexArray(cubeVAO)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, cubeTexture)
model = glm.mat4(1.0)
model = glm.translate(model, glm.vec3(0.0, 0.0, -0.75))
shader.setMat4("model", model)
glDrawArrays(GL_TRIANGLES, 0, 36)
# cube 2
scale = 0.35
model = glm.mat4(1.0)
model = glm.scale(model, glm.vec3(scale, scale, scale))
model = glm.translate(model, glm.vec3(0.0, 0, 2.0))
shader.setMat4("model", model)
glDrawArrays(GL_TRIANGLES, 0, 36)
glBindVertexArray(0)
# end of capture
if camera.capture:
# save outputs
depth = glReadPixelsf(0, 0, SCR_HEIGHT, SCR_WIDTH, GL_RED, GL_FLOAT)
np.save('depth', np.flipud(depth))
# unbind framebuffer back to default
glBindFramebuffer(GL_FRAMEBUFFER,0)
# reset capture flag
camera.capture = False
# reset shader
shader= shaderDisplay
glfwSwapBuffers(window)
glfwPollEvents()
```

When I capture the following scene

the output I get is the following,

What I cannot understand is why I get repeated instances of my cube instead of a single one? (Given the vertex and fragment shaders, and given that I am not rendering again once I capture the scene). Also, this might be related to the first error, why does the result appear as bands?