I am jus trying to draw a 3D Cartesian axis to see if I can get 3D to work. I am using GTK with Cairo to do the drawing. Here is my code...
glm::vec3 camera_pos(0, 0, -10);
glm::vec3 camera_target(0, 0, 0);
glm::vec3 up(0, 1, 0);
gboolean draw_callback(GtkWidget* widget, cairo_t* cr, gpointer data)
{
guint width, height;
width = gtk_widget_get_allocated_width(widget);
height = gtk_widget_get_allocated_height(widget);
glm::mat4 model = glm::translate(glm::vec3(width / 2.0f, height / 2.0f, 0.0f)) *
glm::scale(glm::vec3(100.0f, 100.0f, 100.0f));
glm::mat4 camera = glm::lookAt(camera_pos, camera_target, up);
glm::mat4 perspective = glm::perspective(45.0f, (float)width / (float)height, 0.1f, 100.0f);
glm::mat4 transform = perspective * camera * model;
glm::vec4 xaxis = transform * glm::vec4(1, 0, 0, 1);
glm::vec4 yaxis = transform * glm::vec4(0, 1, 0, 1);
glm::vec4 zaxis = transform * glm::vec4(0, 0, 1, 1);
glm::vec4 orig = transform * glm::vec4(0, 0, 0, 1);
cout << orig.x << " " << orig.y << endl;
cairo_set_source_rgb(cr, 1.0, 0, 0);
cairo_move_to(cr, orig.x, -orig.y);
cairo_line_to(cr, xaxis.x, -xaxis.y);
cairo_stroke(cr);
cairo_set_source_rgb(cr, 0, 1.0, 0);
cairo_move_to(cr, orig.x, -orig.y);
cairo_line_to(cr, yaxis.x, -yaxis.y);
cairo_stroke(cr);
cairo_set_source_rgb(cr, 0, 0, 1.0);
cairo_move_to(cr, orig.x, -orig.y);
cairo_line_to(cr, zaxis.x, -zaxis.y);
cairo_stroke(cr);
return FALSE;
}
Can someone check the math here because I am getting a lot of negative coordinates. Nothing appears on my screen when I run this.
Edit:
I just modified my camera vectors like this.
gboolean draw_callback(GtkWidget* widget, cairo_t* cr, gpointer data)
{
guint width, height;
width = gtk_widget_get_allocated_width(widget);
height = gtk_widget_get_allocated_height(widget);
glm::vec3 camera_pos(0, 0, 1000);
glm::vec3 camera_target(width / 2.0f, height / 2.0f, 0);
glm::vec3 up(0, 1, 0);
glm::mat4 model = glm::translate(glm::vec3(width / 2.0f, height / 2.0f, 0.0f));// *
//glm::scale(glm::vec3(100.0f, 100.0f, 100.0f));
glm::mat4 camera = glm::lookAt(camera_pos, camera_target, up);
glm::mat4 perspective = glm::perspective(45.0f, (float)width / (float)height, 0.1f, 100.0f);
glm::mat4 transform = perspective * camera * model;
glm::vec4 xaxis = transform * glm::vec4(100, 0, 0, 1);
glm::vec4 yaxis = transform * glm::vec4(0, 100, 0, 1);
glm::vec4 zaxis = transform * glm::vec4(0, 0, 100, 1);
glm::vec4 orig = transform * glm::vec4(0, 0, 0, 1);
cout << xaxis.x << " " << xaxis.y << endl;
cairo_set_source_rgb(cr, 1.0, 0, 0);
cairo_move_to(cr, orig.x, -orig.y);
cairo_line_to(cr, xaxis.x, -xaxis.y);
cairo_stroke(cr);
cairo_set_source_rgb(cr, 0, 1.0, 0);
cairo_move_to(cr, orig.x, -orig.y);
cairo_line_to(cr, yaxis.x, -yaxis.y);
cairo_stroke(cr);
cairo_set_source_rgb(cr, 0, 0, 1.0);
cairo_move_to(cr, orig.x, -orig.y);
cairo_line_to(cr, zaxis.x, -zaxis.y);
cairo_stroke(cr);
return FALSE;
}
Now I can see a blue line from the corner of my screen but it is still wrong.
You are using a perspective projection, but you do not carry out the persepctive divide, which will totally screw up your results. glm::perspective will create a matrix which maps the viewing frustum with the given angle and apsect ratio along the negative z axis to a [-w,w]^3 "cube" in clip space. After the perspective divide by the w coordinate, the viewing frustum will be [-1,1]^3 in normalized device coords. Usually at this stage, the coordinates are further converted into window space, where the actual pixels come into play.
In your case, you seem to try to incorporate the window resolution in the model transform at the start of the transform chain, which is totally weird if you later apply a standard GL projection matrix.
Related
I can't get the camera transforms to work with glm...
Probably just a silly thing I missed but I just can't find it... help?
glViewport(0, 0, m_width, m_height);
glClearColor(0.5f, 0.5f, 1.0f, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
float fovy = 60.0f;
float aspect = m_width / float(m_height);
float znear = 0.1f;
float zfar = 100.0f;
glm::mat4 Mp = glm::perspective(fovy, aspect, znear, zfar);
glMultMatrixf(&Mp[0][0]);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glm::vec3 eye = glm::vec3(5, 5, 5);
glm::vec3 lookat = glm::vec3(0, 0, 0);
glm::vec3 up = glm::vec3(0, 1, 0);
glm::mat4 M = glm::lookAt(eye, lookat, up);
glMultMatrixf(&M[0][0]);
glBegin(GL_TRIANGLES);
glColor3f(1, 1, 1);
glVertex3f(-10,-10,-10);
glVertex3f(10,-10,-10);
glVertex3f(10, 10,-10);
glColor3f(1, 1, 0);
glVertex3f(-10,-10,-10);
glVertex3f(10, 10,-10);
glVertex3f(-10, 10,-10);
glEnd();
This gives the following output:
m_width = 1024, m_heigth = 768
Compared to gluPerspective, the unit of the angle argument of glm::perspective is Radian. Use glm::radians o convert from degrees to radians:
glm::perspective(fovy, aspect, znear, zfar);
glm::perspective(glm::radians(fovy), aspect, znear, zfar);
Instead of glLoadIdentity followed by glMultMatrixf you can use glLoadMatrixf.
In addition, you can use glm::value_ptr to get a pointer to the matrix fields:
float fovy = 60.0f;
float aspect = m_width / float(m_height);
float znear = 0.1f;
float zfar = 100.0f;
glm::mat4 Mp = glm::perspective(glm::radians(fovy), aspect, znear, zfar);
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(glm::value_ptr(Mp));
glm::vec3 eye = glm::vec3(5, 5, 5);
glm::vec3 lookat = glm::vec3(0, 0, 0);
glm::vec3 up = glm::vec3(0, 1, 0);
glm::mat4 M = glm::lookAt(eye, lookat, up);
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(glm::value_ptr(M));
Hello I've tried to split a window in four viewports where I want to render a scene in each viewport.
For simplicity I have simplified the code to only contain a single camera view.
The code look roughly as follows:
void setup_trf(const PolarCoords& pc, double aspect)
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, aspect, 0.01, 1000);
//glFrustum(-1.0, 1.0, -1.0, 1.0, 1.5, 20.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
double eye_x, eye_y, eye_z;
pc.to_cartesian(eye_x, eye_y, eye_z);
double look_x = 0;
double look_y = 0;
double look_z = 0;
double up_x = 0;
double up_y = 0;
double up_z = 1;
gluLookAt(
eye_x, eye_y, eye_z,
look_x, look_y, look_z,
up_x, up_y, up_z);
}
void draw_scene(int w, int h) {
glClearColor(0.0f, 0.75f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
double aspect = w / double(h);
glViewport(0, h / 2, w / 2, h);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
glViewport(w / 2, h / 2, w, h);
setup_trf(app_state.m_polar, aspect*0.5);
draw_sphere();
glViewport(0, 0, w / 2, h/2);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
glViewport(w / 2, 0, w, h / 2);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
}
And the result is the following:
view of the sphere
Anyone knows why the image gets stretched in the different viewports?
The 1st and 2nd parameter of glViewport are the bottom left coordinate (origin) of the viewport rectangle. But the 3rd and 4th parameter are the width and height of the viewport rectangle rather than the top right coordinate.
This means the 3rd and 4th parameter have to be the half of the window size (w/2, h/2) in each case:
glViewport(0, h/2, w/2, h/2);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
glViewport(w/2, h/2, w/2, h/2);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
glViewport(0, 0, w/2, h/2);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
glViewport(w/2, 0, w/2, h/2);
setup_trf(app_state.m_polar, aspect);
draw_sphere();
I'm trying to model the Solar System and I'm experiencing the Fish-eyes effects.
As you can see from the picture, the sun is at the centre, the Earth is translated 20 units to the x-axis and it doesn't look like a sphere anymore.
In glm::lookAt function, I set the field of view (is this FoVx?, I read from wiki, this is horizontal field of view so I think this is FoVx.) to be 80 degree, aspect ratio is 5.4f / 3.0f (most of application set the aspect ratio to be 4/3 but if I use this ratio, these planets look exactly like an ellipse!).
So how can I solve this problem? As the Earth would be orbiting around the Sun, and it should be always be a sphere at any angle and distance of view. Maybe I haven't completely understand the perspective, view matrix etc. Really need some help.
Here is the code which I used to render the Sun and the Earth:
void renderSun(int i){
glPushMatrix();
glLoadIdentity();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_ID[0]);
glUniform1i(texture_Location, 0);
glm::mat4 Projection = glm::perspective(90.0f, 5.4f / 3.0f, 1.0f, 100.0f);
glm::mat4 View = glm::lookAt(
glm::vec3(0, 30, 1),
glm::vec3(0, 0, 0),
glm::vec3(0, 1, 0)
);
/* Animations */
GLfloat angle = (GLfloat) (i);
//View = glm::translate(View, glm::vec3(-2.0f, 0.0f, 0.0f));
View = glm::rotate(View, angle * 0.5f, glm::vec3(0.0f, 0.0f, 1.0f));
/* ******* */
glm::mat4 Model = glm::mat4(1.0f);
glm::mat4 MVP = Projection * View * Model;
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "mvpMatrix"), 1, GL_FALSE, glm::value_ptr(MVP));
glDrawElements(GL_TRIANGLES, numsToDraw, GL_UNSIGNED_INT, NULL);
glPopMatrix();
}
void renderEarth(int i){
glPushMatrix();
glLoadIdentity();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_ID[3]);
glUniform1i(texture_Location, 0);
//glm::mat4 Projection = glm::ortho(0.f, 800.f, 0.f, 400.f, -5.f, 5.f );
glm::mat4 Projection = glm::perspective(80.0f, 5.4f / 3.0f, 1.0f, 100.0f);
glm::mat4 View = glm::lookAt(
glm::vec3(0, 30, 2),
glm::vec3(0, 0, 0),
glm::vec3(0, 1, 0)
);
/* Animations */
GLfloat angle = (GLfloat) (i);
View = glm::translate(View, glm::vec3(30.0f, 0.0f, 0.0f));
//View = glm::scale(View, glm::vec3(4.0f, 5.0f, 4.0f));
View = glm::rotate(View, angle * 0.5f, glm::vec3(0.0f, 0.0f, 1.0f));
/* ******* */
glm::mat4 Model = glm::mat4(1.0f);
glm::mat4 MVP = Projection * View * Model;
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "mvpMatrix"), 1, GL_FALSE, glm::value_ptr(MVP));
glDrawElements(GL_TRIANGLES, numsToDraw, GL_UNSIGNED_INT, NULL);
glPopMatrix();
}
I'm using code to draw text as textures in OpenGL (Qt4.8.3 + Linux (debian-like)).
Code was ported from 2D project, where it is working good.
2D project was using gluOrtho2D, now I use gluLookAt for 3D.
The issue is that instead of text I'm seing colored rectangle.
If I turn on GL_DEPTH_TEST I see artifacts instead of text. BTW artifacts change if I move camera, which is quite strange.
Here's the code:
void GLWidget::paintGL() {
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); //Set blending function.
glEnable(GL_BLEND); //Enable blending.
glShadeModel(GL_SMOOTH);
glEnable(GL_MULTISAMPLE);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glPushMatrix();
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
gluPerspective( 60.0f, (GLdouble) width() / (GLdouble) height(), 0.001f, 10000.0f );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
// Set up current camera
gluLookAt( cameraDistance * sin(cameraTheta * M_PI / 180) * cos(cameraPhi * M_PI / 180),
cameraDistance * sin(cameraTheta * M_PI / 180) * sin(cameraPhi * M_PI / 180),
cameraDistance * cos(cameraTheta * M_PI / 180),
0.0, 0.0, 0.0,
0.0, 0.0, 1.0);
glTranslatef(-4.5355, -4.5355, 0.0);
glPushMatrix();
// draw text labels
drawLabel(1, 0, 90, "1");
drawLabel(2, 0, 90, "2");
drawLabel(3, 0, 90, "3");
drawLabel(4, 0, 90, "4");
drawLabel(5, 0, 90, "5");
glPopMatrix();
glGetIntegerv(GL_VIEWPORT, viewport);
glGetDoublev(GL_MODELVIEW_MATRIX, modelview);
glGetDoublev(GL_PROJECTION_MATRIX, projection);
glPopMatrix();
}
void GLWidget::drawLabel(float xpos, float ypos, float angle, char *txt) {
float labelHeight = 0.3;
float labelWidth = labelHeight / 2;
float margin = labelWidth / 10;
float len = (float) strlen(txt);
glPushMatrix();
glRotatef(-angle, 0, 0, -1);
glTranslatef(xpos, ypos, 0.0f);
glRotatef(angle, 0, 0, -1);
glScalef(1.0, -1.0, 1.0);
glTranslatef(- len * labelWidth / 2, -labelHeight / 2 + margin, 0.0f);
// background
glColor3f(0.0f, 0.0f, 0.0f);
glBegin(GL_QUADS);
glVertex3f(-margin, -margin, 0);
glVertex3f(len * labelWidth + margin, -margin, 0);
glVertex3f(len * labelWidth + margin, labelHeight + margin, 0);
glVertex3f(-margin, labelHeight + margin, 0);
glEnd();
// text
glColor3f(0.5f, 0.5f, 0.5f);
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, glFont->getTextureID() );
glFont->drawText(labelWidth, labelHeight, txt);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
glPopMatrix();
}
void oglFont::drawText(GLfloat cw, GLfloat ch, char *txt)
{
glBegin(GL_QUADS);
//character location and dimensions
GLfloat cx = 0.0f;
GLfloat cy = 0.0f;
//calculate how wide each character is in term of texture coords
GLfloat dtx = float(c_width) / float(m_width);
GLfloat dty = float(c_height) / float(m_height);
for (char * c = txt; *c != 0; c++, cx += cw) {
int index = getCharIndex(c);
int row = index / c_per_row;
int col = index % c_per_row;
if (index < 0) {
//qDebug() << "glFont: Character outside of font! char: " << c;
}
// find the texture coords
GLfloat tx = float(col * c_width) / float(m_width);
GLfloat ty = float(row * c_height) / float(m_height);
glTexCoord2f(0, 0);
glVertex2f(cx, cy);
glTexCoord2f(1, 0);
glVertex2f(cx + cw, cy);
glTexCoord2f(1, 1);
glVertex2f(cx + cw, cy + ch);
glTexCoord2f(0, 1);
glVertex2f(cx, cy + ch);
}
glEnd();
}
The reason for the artifacts is Z fighting.
This problem can be solved by increasing the depth buffer precision OR by making the different objects further apart from each-other (the ones from drawlable ).
Also make sure that your generated text doesn't get culled by the far plane / near plane (maybe you generate the geometry very near the edges ? ).
It turns out that because texture was loaded in QGLWidget constructor, context was either not created or not set.
I moved texture loading in initializeGL() method and it works great.
I'm trying to make a Camera's View Matrix and make it zoomable, pannable and rotatable. The Matrix that I'd expect to work is.
Matrix.translateM(matrixView, 0, -camposition.X, -camposition.Y, 0);
Matrix.scaleM(matrixView, 0, zoom, zoom, 1.0f);
Matrix.rotateM(matrixView, 0, camrotation, 0, 0, 1.0f);
Matrix.translateM(matrixView, 0, screenwidth / 2, screenheight / 2, 0);
However it doesn't work this way. I would appreciate if somebody could point me in the right direction.
Here's how I did the model matrix which works:
Matrix.translateM(matrixModel, 0, position.X, position.Y, 0.0f);
Matrix.scaleM(matrixModel, 0, scale.X, scale.Y, 1.0f);
Matrix.rotateM(matrixModel, 0, rotation, 0, 0, 1.0f);
Matrix.translateM(matrixModel, 0, -origin.X, -origin.Y, 0.0f);
And here's the projection matrix aswell:
Matrix.orthoM(matrixProj, 0, 0f, screenWidth, screenHeight, 0, -1, 1);
I multiply them together in the vertex shader like so:
matrixProj * matrixView * matrixModel * vPosition;
I've found the correct order I'll leave it for future developers facing the same problem.
Matrix.translateM(matrixView, 0, screenWidth / 2, screenHeight / 2, 0);
Matrix.scaleM(matrixView, 0, camZoom, camZoom, 1.0f);
Matrix.rotateM(matrixView, 0, camRotation, 0, 0, 1.0f);
Matrix.translateM(matrixView, 0, -camPosition.X, -camPosition.Y, 0);
Keep in mind this is for the View Matrix. For the Model Matrix the above code works just fine.