Hi
I am developing an Augmented Reality system based on a rigid registration of a brain model. I have problems in rendering the model and I think that I am not using pushmatrix and multmatrix in the proper way.I have performed the rigid registration and i have obtained both the registration matrix and the pose of the real camera (which is looking at the “real” brain). The fact is that, using them in this code, the model it’s not appearing and (when it appears) it is moving.
I know that the opengl model should stay centered in the window and should only rotate and scale according to the real camera pose.
I hope someone may help me finding the error!
Thank you:)
Here is my code:
#include "ros/ros.h"
#include "geometry_msgs/Pose.h"
#include "geometry_msgs/PoseStamped.h"
#include <iostream>
#include <fstream>
#include <string>
#include <GLFW/glfw3.h>
#include <GL/glu.h>
#include "opencv2/highgui/highgui.hpp"
// My Library
#include"common.hpp"
#include <cv_bridge/cv_bridge.h>
//#include <glm/glm.hpp> non trova niente
#include <vtkGenericDataObjectReader.h>
#include <vtkStructuredGrid.h>
#include <vtkSmartPointer.h>
#include <vtkPolyData.h>
#include <vtkCellArray.h>
#include <vtkPoints.h>
#include <vtkIdList.h>
#include <vtkPolygon.h>
#include <vtkOBJReader.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
#include <vtkRenderWindowInteractor.h>
//KDL
#include <tf/LinearMath/Matrix3x3.h>
#include "kdl_conversions/kdl_msg.h"
#include <kdl/frames.hpp>
using namespace cv;
geometry_msgs::Pose CameraStreamPose;
geometry_msgs::PoseStamped CameraStreamPose_stam;
GLuint texId;
cv::Mat cameraMatrixL;
cv::Mat distCoeffL;
//var VTK
vtkSmartPointer<vtkGenericDataObjectReader> reader = vtkSmartPointer<vtkGenericDataObjectReader>::New();
vtkPolyData* output;
vtkCellArray *strips;
vtkPoints *points;
vtkIdType npts;
vtkIdType * ptIds;
std::vector<float> vertex;
int sizeBuffer;
GLfloat CameraStreamPose_gl[16] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0};
GLfloat ModelPose_gl[16] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0};
GLfloat ModelPose_gl_noInv[16] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0};
GLfloat CameraStreamPose_gl_noInv[16] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0};
GLfloat CameraStreamPose_gl_r[16] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0};
GLfloat ModelPose_gl_r[16] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0};
KDL::Frame M;
KDL::Frame Inversa;
KDL::Frame CameraStreamPose_kdl;
KDL::Frame CameraStreamPose_kdl_inv;
KDL::Frame ModelPose_kdl;
KDL::Frame ModelPose_kdl_inv;
//help
geometry_msgs::Pose GeoPose;
//AR
void render(GLFWwindow* window);
void initGL(int w, int h);
void chatterCallback(const geometry_msgs::PoseStampedPtr& t_s)
{
//CameraStreamPose.orientation = t_s->pose.orientation;
//CameraStreamPose.position = t_s->pose.position;
tf::poseMsgToKDL(t_s->pose,CameraStreamPose_kdl);
CameraStreamPose_kdl_inv = CameraStreamPose_kdl.Inverse();
CameraStreamPose_gl[0] = CameraStreamPose_kdl_inv(0,0);
CameraStreamPose_gl[1] = CameraStreamPose_kdl_inv(1,0);
CameraStreamPose_gl[2] = CameraStreamPose_kdl_inv(2,0);
CameraStreamPose_gl[3] = CameraStreamPose_kdl_inv(3,0);
CameraStreamPose_gl[4] = CameraStreamPose_kdl_inv(0,1);
CameraStreamPose_gl[5] = CameraStreamPose_kdl_inv(1,1);
CameraStreamPose_gl[6] = CameraStreamPose_kdl_inv(2,1);
CameraStreamPose_gl[7] = CameraStreamPose_kdl_inv(3,1);
CameraStreamPose_gl[8] = CameraStreamPose_kdl_inv(0,2);
CameraStreamPose_gl[9] = CameraStreamPose_kdl_inv(1,2);
CameraStreamPose_gl[10] = CameraStreamPose_kdl_inv(2,2);
CameraStreamPose_gl[11] = CameraStreamPose_kdl_inv(3,2);
CameraStreamPose_gl[12] = CameraStreamPose_kdl_inv(0,3);
CameraStreamPose_gl[13] = CameraStreamPose_kdl_inv(1,3);
CameraStreamPose_gl[14] = CameraStreamPose_kdl_inv(2,3);
CameraStreamPose_gl[15] = CameraStreamPose_kdl_inv(3,3);
CameraStreamPose_gl_noInv[0] = CameraStreamPose_kdl(0,0);
CameraStreamPose_gl_noInv[1] = CameraStreamPose_kdl(1,0);
CameraStreamPose_gl_noInv[2] = CameraStreamPose_kdl(2,0);
CameraStreamPose_gl_noInv[3] = CameraStreamPose_kdl(3,0);
CameraStreamPose_gl_noInv[4] = CameraStreamPose_kdl(0,1);
CameraStreamPose_gl_noInv[5] = CameraStreamPose_kdl(1,1);
CameraStreamPose_gl_noInv[6] = CameraStreamPose_kdl(2,1);
CameraStreamPose_gl_noInv[7] = CameraStreamPose_kdl(3,1);
CameraStreamPose_gl_noInv[8] = CameraStreamPose_kdl(0,2);
CameraStreamPose_gl_noInv[9] = CameraStreamPose_kdl(1,2);
CameraStreamPose_gl_noInv[10] = CameraStreamPose_kdl(2,2);
CameraStreamPose_gl_noInv[11] = CameraStreamPose_kdl(3,2);
CameraStreamPose_gl_noInv[12] = CameraStreamPose_kdl(0,3);
CameraStreamPose_gl_noInv[13] = CameraStreamPose_kdl(1,3);
CameraStreamPose_gl_noInv[14] = CameraStreamPose_kdl(2,3);
CameraStreamPose_gl_noInv[15] = CameraStreamPose_kdl(3,3);
CameraStreamPose_gl_r[0] = CameraStreamPose_kdl_inv(0,0);
CameraStreamPose_gl_r[1] = CameraStreamPose_kdl_inv(0,1);
CameraStreamPose_gl_r[2] = CameraStreamPose_kdl_inv(0,2);
CameraStreamPose_gl_r[3] = CameraStreamPose_kdl_inv(0,3);
CameraStreamPose_gl_r[4] = CameraStreamPose_kdl_inv(1,0);
CameraStreamPose_gl_r[5] = CameraStreamPose_kdl_inv(1,1);
CameraStreamPose_gl_r[6] = CameraStreamPose_kdl_inv(1,2);
CameraStreamPose_gl_r[7] = CameraStreamPose_kdl_inv(1,3);
CameraStreamPose_gl_r[8] = CameraStreamPose_kdl_inv(2,0);
CameraStreamPose_gl_r[9] = CameraStreamPose_kdl_inv(2,1);
CameraStreamPose_gl_r[10] = CameraStreamPose_kdl_inv(2,2);
CameraStreamPose_gl_r[11] = CameraStreamPose_kdl_inv(2,3);
CameraStreamPose_gl_r[12] = CameraStreamPose_kdl_inv(3,0);
CameraStreamPose_gl_r[13] = CameraStreamPose_kdl_inv(3,1);
CameraStreamPose_gl_r[14] = CameraStreamPose_kdl_inv(3,2);
CameraStreamPose_gl_r[15] = CameraStreamPose_kdl_inv(3,3);
}
int main(int argc, char **argv)
{
//path = CONFIG_FILE_FOLDER2;
std::string path = "/home/pana/augmented_reality_ws/src/config_file/";
//std::string path = "/home/veronica/PhD/ros_workspace/catkin_augReal/src/AugmentedReality/config_file/";
cv::Mat RegistrationMatrix;
ros::init(argc, argv, "aug");
ros::NodeHandle n;
ros::Subscriber sub = n.subscribe("/camera/pose", 1000, chatterCallback); // /camera/pose1 col pacchetto nostro
ros::Rate looprate(100);
readParamsCamera(path + "LeftCameraParameters.xml",
cameraMatrixL,
distCoeffL);
double a;
ifstream file("trasf.txt");
if (file.is_open()){
//std::cout<<"ciao"<<std::endl;
int i_row=0;
int j_column=0;
while(file>>a){
if(i_row<3 && j_column<3){
ModelPose_kdl.M(i_row,j_column) = a;
std::cout<<"M "<<ModelPose_kdl.M(i_row,j_column)<<std::endl;
}
if(i_row<3 && j_column==3){
ModelPose_kdl.p(i_row) = a/1000;
std::cout<<"p "<<ModelPose_kdl.p(i_row)<<std::endl;
}
j_column++;
if(j_column==4){
j_column=0;
i_row++;
}
}
//std::cout<<"fine"<<std::endl;
//getchar();
}
file.close();
ModelPose_kdl_inv = ModelPose_kdl.Inverse();
//COLUMN MAJOR
ModelPose_gl[0] = ModelPose_kdl_inv(0,0);
ModelPose_gl[1] = ModelPose_kdl_inv(1,0);
ModelPose_gl[2] = ModelPose_kdl_inv(2,0);
ModelPose_gl[3] = ModelPose_kdl_inv(3,0);
ModelPose_gl[4] = ModelPose_kdl_inv(0,1);
ModelPose_gl[5] = ModelPose_kdl_inv(1,1);
ModelPose_gl[6] = ModelPose_kdl_inv(2,1);
ModelPose_gl[7] = ModelPose_kdl_inv(3,1);
ModelPose_gl[8] = ModelPose_kdl_inv(0,2);
ModelPose_gl[9] = ModelPose_kdl_inv(1,2);
ModelPose_gl[10] = ModelPose_kdl_inv(2,2);
ModelPose_gl[11] = ModelPose_kdl_inv(3,2);
ModelPose_gl[12] = ModelPose_kdl_inv(0,3);
ModelPose_gl[13] = ModelPose_kdl_inv(1,3);
ModelPose_gl[14] = ModelPose_kdl_inv(2,3);
ModelPose_gl[15] = ModelPose_kdl_inv(3,3);
ModelPose_gl_noInv[0] = ModelPose_kdl(0,0);
ModelPose_gl_noInv[1] = ModelPose_kdl(1,0);
ModelPose_gl_noInv[2] = ModelPose_kdl(2,0);
ModelPose_gl_noInv[3] = ModelPose_kdl(3,0);
ModelPose_gl_noInv[4] = ModelPose_kdl(0,1);
ModelPose_gl_noInv[5] = ModelPose_kdl(1,1);
ModelPose_gl_noInv[6] = ModelPose_kdl(2,1);
ModelPose_gl_noInv[7] = ModelPose_kdl(3,1);
ModelPose_gl_noInv[8] = ModelPose_kdl(0,2);
ModelPose_gl_noInv[9] = ModelPose_kdl(1,2);
ModelPose_gl_noInv[10] = ModelPose_kdl(2,2);
ModelPose_gl_noInv[11] = ModelPose_kdl(3,2);
ModelPose_gl_noInv[12] = ModelPose_kdl(0,3);
ModelPose_gl_noInv[13] = ModelPose_kdl(1,3);
ModelPose_gl_noInv[14] = ModelPose_kdl(2,3);
ModelPose_gl_noInv[15] = ModelPose_kdl(3,3);
ModelPose_gl_r[0] = ModelPose_kdl(0,0);
ModelPose_gl_r[1] = ModelPose_kdl(0,1);
ModelPose_gl_r[2] = ModelPose_kdl(0,2);
ModelPose_gl_r[3] = ModelPose_kdl(0,3);
ModelPose_gl_r[4] = ModelPose_kdl(1,0);
ModelPose_gl_r[5] = ModelPose_kdl(1,1);
ModelPose_gl_r[6] = ModelPose_kdl(1,2);
ModelPose_gl_r[7] = ModelPose_kdl(1,3);
ModelPose_gl_r[8] = ModelPose_kdl(2,0);
ModelPose_gl_r[9] = ModelPose_kdl(2,1);
ModelPose_gl_r[10] = ModelPose_kdl(2,2);
ModelPose_gl_r[11] = ModelPose_kdl(2,3);
ModelPose_gl_r[12] = ModelPose_kdl(3,0);
ModelPose_gl_r[13] = ModelPose_kdl(3,1);
ModelPose_gl_r[14] = ModelPose_kdl(3,2);
ModelPose_gl_r[15] = ModelPose_kdl(3,3);
//opencv
VideoCapture cap(0); // open the video camera no. 0 1-->CAMERA ESTERNA
if (!cap.isOpened()) // if not success, exit program
{
std::cout << "Cannot open the video cam" << std::endl;
return -1;
}
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
std::cout << "Frame size : " << dWidth << " x " << dHeight << std::endl;
//opengl
GLFWwindow* window;
/* Initialize the library */
if (!glfwInit())
return -1;
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(640, 480, "Augmented Reality", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
initGL((int)dWidth, (int)dHeight);
// simply set filename here (oh static joy)
std::string inputFilename = "/home/pana/augmented_reality_ws/src/cubo_aug/gadolini_2500.vtk";
// Get all data from the file
reader->SetFileName(inputFilename.c_str());
reader->Update();
// All of the standard data types can be checked and obtained like this:
output = reader->GetPolyDataOutput();
strips = output->GetStrips();
points = output->GetPoints();
std::cout << "output has " << points << " points" << std::endl;
while (ros::ok() && !glfwWindowShouldClose(window))
{
Mat frame;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
std::cout << "Cannot read a frame from video stream" << std::endl;
break;
}
// Copy frame to texture
glBindTexture(GL_TEXTURE_2D, texId);
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0,
(int)dWidth, (int)dHeight,
GL_BGR, GL_UNSIGNED_BYTE, frame.data);
/* Do the render */
render(window);
/* Poll for and process events */
glfwPollEvents();
ros::spinOnce();
looprate.sleep();
}
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
void initGL(int w, int h)
{
// Background color
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
// Smooth triangles (GL_FLAT for flat triangles)
glShadeModel(GL_SMOOTH);
// We don't see the back face
glCullFace(GL_BACK);
// The front face is CCW
glFrontFace(GL_CCW);
// Disable culling
glDisable(GL_CULL_FACE);
// Disable lighting
glDisable(GL_LIGHTING);
// Disable depth test
glEnable(GL_DEPTH_TEST);
// Enable normalizing
glEnable(GL_NORMALIZE);
// Enable blending
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBlendEquation(GL_FUNC_ADD);
// Generate texture
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
}
//per triangolo
void render(GLFWwindow* window)
{
// Get window dimensions
int width;
int height;
glfwGetFramebufferSize(window, &width, &height);
// We draw in all the window
glViewport(0, 0, width, height);
// Clear buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/* Draw camera image in the background */
// Set camera
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// Set 2D camera
gluOrtho2D(-1.0, 1.0, 1.0, -1.0);
//gluOrtho2D(0.0, 2.0, 0.0, -2.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Disable
glDisable(GL_DEPTH_TEST);
// Draw a square with the texture
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texId);
glColor3f(1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glTexCoord2f(1.0f, 1.0f);
glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f(1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(-1.0f, 1.0f, 0.0f);
glEnd();
/* Draw 3D object */
// Set camera
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
cv::Mat newIntrsMatrix;
cv::Size size = Size((double)width,(double)height);
//find undistorted intrinsic parameters matrix (cameraMatrixL; distCoeffL;)
newIntrsMatrix = getOptimalNewCameraMatrix(cameraMatrixL, distCoeffL, size, 1,size);
//compute fovy f=cotangent(fovy/2)
//float fovy = 2*atan(1/newIntrsMatrix.at<double>(0,0));
float fovy = 2.0*atan(1.0/cameraMatrixL.at<double>(0,0));
std::cout<<"fuoco "<<cameraMatrixL.at<double>(0,0)<<std::endl;
std::cout<<"fovy "<<fovy<<std::endl;
//float fovy = 2*atan(cameraMatrixL.at<double>(0,0));
// Here set the camera intrinsic parameters
// fov in degrees, aspect ratio, minimum distance (DO NOT PUT ZERO!), maximum distance
//gluPerspective(fovy, (float)width / (float)height, 0.01f, 10.0f);
gluPerspective(120.0f, (float)width / (float)height, 0.01f, 10.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Here set the camera INVERSE transform using glMultMatrixf(transform)
glMultMatrixf(CameraStreamPose_gl_r);
// Enable
glEnable(GL_DEPTH_TEST);
// Draw a square with the texture
glDisable(GL_TEXTURE_2D);
glPushMatrix();
// Here set the object transform (COLUMN MAJOR!)
/*
* Rxx Rxy Rxz 0
* Ryx Ryy Ryz 0
* Rzx Rzy Rzz 0
* Tx Ty Tz 1
*/
glMultMatrixf(ModelPose_gl_r);
// Replace these lines with your model
glColor4f(0.0f, 0.0f, 1.0f, 0.2f);
for (strips->InitTraversal(); strips->GetNextCell(npts,ptIds);)
{
// Build the structures
vertex.clear();
for (int j = 0; j < npts; j++)
{
double v[3];
points->GetPoint(ptIds[j], v);
vertex.push_back(v[0]);
vertex.push_back(v[1]);
vertex.push_back(v[2]);
}
// Build the buffers
sizeBuffer = vertex.size()*sizeof(GLfloat);
// Render
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, &vertex[0]);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (int)vertex.size()/3);
glDisableClientState(GL_VERTEX_ARRAY);
}
glEnd();
// End of replace
glPopMatrix();
// Swap buffers
glfwSwapBuffers(window);
}