TL;DR: I’m abandoning my board.
Let me go in reverse and put the rest of the information I have out there.
Misc
The project I had (and still have) in mind is to connect several Pi’s to old laptop screens and synchronously display video on each of them using some sort of master/slave architecture.
I failed (or maybe the board failed me) at the first step.
Maybe OpenGL wasn’t the optimal solution since one could probably just write a script which opens up the video files on each Pi and keeps them in sync. But I wanted to learn about OpenGL and better my lacking fluency in C++.
Was I sure that the Pi could rasterize and display at 1024x764? No. Maybe it’s my naivety speaking but I thought if the Pi could decode 1920x1080 at 30 FPS it meant displaying at that resolution.
Honestly I should have probably just opened the video using the installed video player. I did earlier today and the video played back somewhat smoothly. Maybe 24 FPS?
Perhaps there is some more magic that can be applied to boost that framerate. But considering the time spent and the option to upgrade the board, at this point I’ll go for a slightly better board.
The Board
Weirdly enough I haven’t stated which board I’m doing my experiments with.
I bought a Banana Pi M2 Zero with Mali 400 MP2 a few months ago. I didn’t think too much about it and was honestly just happy there was a cheap alternative to Raspberry Pi scalpers.
Buffers
Since my objective was to only stream a video frame by frame onto a texture, I didn’t (knowingly) allocate anything but the Color Buffer.
I’ve queried OpenGL ES on my Pi and there a 1X Color buffer allocated - which is the default from what I’ve read.
Code
In the interest of sharing everything I have for potential future readers, my main method:
int main(int argc, const char * argv[]) {
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_DOUBLEBUFFER, GL_TRUE);
// get primary display for fullscreen
GLFWmonitor *primary = glfwGetPrimaryMonitor();
// create window
GLFWwindow* window = glfwCreateWindow(VIEWPORT_WIDTH, VIEWPORT_HEIGHT, "LearnOpenGL", NULL, NULL);
if (window == NULL) {
std::cout << "Failed to create GLFW window" << std::endl;
glfwTerminate();
return -1;
}
// TODO: WRONG - GET REAL MONITOR RESOLUTION
const GLFWvidmode *video_mode = glfwGetVideoMode(primary);
MONITOR_WIDTH = video_mode->width;
MONITOR_HEIGHT = video_mode->height;
glfwSwapInterval(0);
// hide cursor
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_HIDDEN);
glfwMakeContextCurrent(window);
#ifdef __APPLE__
if (!gladLoadGLLoader((GLADloadproc) glfwGetProcAddress)) {
std::cout << "Failed to initialize GLAD" << std::endl;
return -1;
}
#endif
#ifdef __unix
if (!gladLoadGLES2Loader((GLADloadproc) glfwGetProcAddress)) {
std::cout << "Failed to initialize GLAD" << std::endl;
return -1;
}
#endif
// load and compile shaders
Shader shader("path/to/vertex_shader.vs", "path/to/fragment_shader.fs");
// change viewPort (renderable area) with window size
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
// open video reader
VideoReaderContext video_ctx;
if(!open_video_reader("path/to/video.mp4", &video_ctx)) {
std::cout << "Couldn't read frame" << std::endl;
}
constexpr int ALIGNMENT = 128;
VIDEO_WIDTH = video_ctx.width;
VIDEO_HEIGHT = video_ctx.height;
RGB_FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 4;
float VIEWPORT_WIDTH_RATIO = 1 - VIDEO_WIDTH / VIEWPORT_WIDTH;
float VIEWPORT_HEIGHT_RATIO = 1 - VIDEO_HEIGHT / VIEWPORT_HEIGHT;
GLfloat vertices[] = {
// vertices padding texture
1 - VIEWPORT_WIDTH_RATIO, 1 - VIEWPORT_HEIGHT_RATIO, 0, 0, 1, 1,
1 - VIEWPORT_WIDTH_RATIO, -1 + VIEWPORT_HEIGHT_RATIO, 0, 0, 1, 0,
-1 + VIEWPORT_WIDTH_RATIO, -1 + VIEWPORT_HEIGHT_RATIO, 0, 0, 0, 0,
-1 + VIEWPORT_WIDTH_RATIO, 1 - VIEWPORT_HEIGHT_RATIO, 0, 0, 0, 1
};
GLuint indices[] = {
0, 1, 3, // first triangle
1, 2, 3 // second triangle
};
// create and bind texture
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// how to handle overscaling
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
uint8_t *frame_buffer;
if (posix_memalign((void**) &frame_buffer, ALIGNMENT, RGB_FRAME_SIZE) != 0) {
std::cout << "Couldn't allocate frame buffer" << std::endl;
}
std::vector<void*> frames;
std::vector<int64_t> pts_list;
int c = 0;
while (!video_ctx.end_of_file) {
void *temp;
int64_t pts;
if (!read_frame(&video_ctx, frame_buffer, &pts)) {
std::cout << "Failed to load frame" << std::endl;
return 1;
}
pts_list.push_back(pts);
if (posix_memalign((void**) &temp, ALIGNMENT, RGB_FRAME_SIZE) != 0) {
std::cout << "Couldn't allocate frame buffer" << std::endl;
}
std::memcpy(temp, frame_buffer, RGB_FRAME_SIZE);
frames.push_back(temp);
}
free(frame_buffer);
// frame row alignment
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
// create vertex buffer object, which is sent to GPU as a whole
unsigned int VBO;
glGenBuffers(1, &VBO);
// create element buffer object, which is sent to GPU as a whole
// EBO uses indices to draw triangles in a given order to avoid overlap
unsigned int EBO;
glGenBuffers(1, &EBO);
unsigned int VAO;
glGenVertexArrays(1, &VAO);
// bind vertex array object
glBindVertexArray(VAO);
// copy vertices array in a buffer for OpenGL to use
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices) + 8, vertices, GL_STATIC_DRAW);
// position attributes
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (void*) 0);
glEnableVertexAttribArray(0);
// vertex sequence
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (void*)(4 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
// copy index array in element buffer for OpenGL to use
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
int current_frame = 0;
int counting_frame = 0;
unsigned int PBO;
glGenBuffers(1, &PBO);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, PBO);
// get the next bigger number of RGBA_FRAME_SIZE in the series x^n where x in 2^i
int BUFFER_SIZE = get_next_aligned_number(128);
int FRAMES_IN_BUFFER = 16;
glBufferData(GL_PIXEL_UNPACK_BUFFER, BUFFER_SIZE * FRAMES_IN_BUFFER, NULL, GL_STREAM_DRAW);
// bind texture
glBindTexture(GL_TEXTURE_2D, texture);
// load texture into OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, VIDEO_WIDTH, VIDEO_HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
if (get_error("glTexImage")) {
return -1;
}
bool initial = true;
bool clear_ghosts = false;
shader.use();
glBindVertexArray(VAO);
float start_time, end_time;
// render loop
while (!glfwWindowShouldClose(window)) {
static bool initial_frame;
if (initial_frame) {
glfwSetTime(0.0);
initial_frame = false;
}
start_time = glfwGetTime();
if (counting_frame % frames.size() == 0) {
initial_frame = true;
}
process_input(window);
// clear the colorbuffer
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
if (get_error("glClear")) {
return -1;
}
int64_t *pts = &pts_list.at(current_frame);
double pt_in_seconds = *pts * (double) video_ctx.time_base.num / (double) video_ctx.time_base.den;
/*
if (pt_in_seconds > glfwGetTime()) {
glfwWaitEventsTimeout(pt_in_seconds - glfwGetTime());
}
*/
if (initial) {
glBufferData(GL_PIXEL_UNPACK_BUFFER, BUFFER_SIZE * FRAMES_IN_BUFFER, NULL, GL_STREAM_DRAW);
if (get_error("glBufferData")) {
return -1;
}
for (int i=0; i!=FRAMES_IN_BUFFER; i++) {
glBufferSubData(GL_PIXEL_UNPACK_BUFFER, BUFFER_SIZE * i, BUFFER_SIZE, frames[(current_frame + i) % frames.size()]);
if (get_error("glBufferSubData")) {
return -1;
}
}
}
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE, (void*)(intptr_t)(BUFFER_SIZE * (counting_frame % FRAMES_IN_BUFFER)));
if (get_error("glTexSubImage")) {
return -1;
}
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
if (get_error("glDrawElements")) {
return -1;
}
glfwSwapBuffers(window);
glfwPollEvents();
initial = false;
if (current_frame == 4) {
clear_ghosts = true;
}
if (clear_ghosts) {
glBufferSubData(GL_PIXEL_UNPACK_BUFFER, BUFFER_SIZE * ((FRAMES_IN_BUFFER + (counting_frame - 4)) % FRAMES_IN_BUFFER) , BUFFER_SIZE, frames[(counting_frame + FRAMES_IN_BUFFER - 4) % frames.size()]);
}
glFinish();
if(get_error("glFinish")) {
return -1;
}
end_time = glfwGetTime();
std::cout << end_time - start_time << "ms" << std::endl;
std::cout << 1 / (end_time - start_time) << "FPS" << std::endl << std::endl;
current_frame = ++current_frame % frames.size();
counting_frame++;
}
glfwTerminate();
close_reader(&video_ctx);
for (auto frame : frames) {
free(frame);
}
return 0;
}
A Few comments:
- The Shader class is pretty much the same as introduced in the beginning of learnopengl
- The video reader is based off of a few videos by the beautiful Bartholomew and uses FFmpeg to decode video file (Part One)
- This implementation copies a number of frames into the PBO and copies new data into memory 4 frames after the the frame has been rendered (approx. as suggested by Dark_Photon)
- I would still consider myself to be a C++ newbie so forgive my obvious mistakes if found
Final Remarks (for now)
I must again thank @Dark_Photon for their patience and willingness to explain details I wouldn’t have thought to come across.
I’m looking for a new board now and’ll try to make sure it can handle the video playback.
So long!