Show glutSolidCone on its side - c++

I am trying to look at a cone lying on its side from above. For this I wrote the following code:
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(250, 250, 0);
glRotatef(90, 1, 0, 0);
glutSolidCone(20, 20, 20, 20);
glTranslatef(-250, -250, 0);
glRotatef(-90, 1, 0, 0);
glFlush();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
float x, y;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(500, 500);
glutCreateWindow("Cone");
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutMotionFunc(drag);
glutMouseFunc(click);
glutSpecialFunc(catchKey);
glEnable(GL_DEPTH_TEST);
glutMainLoop();
return 0;
}
I translate to the middle of the screen and then rotate 90 degrees around the x-axis, which in my idea should produce the cone on its side. The result however is a not a cone but a line which is not tilted 90 but 45 degrees. What should I change in order for the cone to show up as I want it to?

Where is the projection matrix?
If you don't set a projection matrix, then the coordiantes have to be set in normalize device space. In NDC all the coordinates are in range [-1.0, 1.0]:
e.g.
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.5, 0.5, 0);
glRotatef(90, 1, 0, 0);
glutSolidCone(1.0, 1.0, 20, 20);
glTranslatef(-0.5, -0.5, 0);
glRotatef(-90, 1, 0, 0);
Alternatively you can setup an orthographic projection, which projects the world coordinates 1:1 to the window coordinates. The projection matrix can be set by glOrtho:
e.g.
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 500.0, 500.0, 0.0, -100.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(250, 250, 0);
glRotatef(90, 1, 0, 0);
glutSolidCone(20, 20, 20, 20);
glTranslatef(-250, -250, 0);
glRotatef(-90, 1, 0, 0);
glFlush();
glutSwapBuffers();
}
The projection matrix describes the mapping from 3D points of the view on a scene, to 2D points on the viewport. At Orthographic Projection, the view space coordinates are linearly mapped to the clip space coordinates. The clip space coordinates are equal to the normalized device coordinates.
The normalized device coordinates are linearly mapped to the viewport rectangle.
The viewport rectangle can be defined by glViewport. Initially it is defined by the size of the window.
Note, that drawing by glBegin/glEnd sequences and the fixed function matrix stack is deprecated since decades. See Fixed Function Pipeline and Legacy OpenGL.
Read about Vertex Specification and Shader for a state of the art way of rendering.

Related

Create rotating sphere with longitude lines c++ opengl

I've started using OpenGL and I'm trying to create a wired sphere with colored longitude lines (like timezones) that rotates.
I'm trying to draw them using gluDisk-s and apply shifting in glRotatef func but I get following result (shown on images)
How can I fix it?
May be there is better way to do this?
The code I'm using:
void CreateDisk(int shift) {
quad = gluNewQuadric();
gluQuadricDrawStyle(quad, GLU_LINE);
glPushMatrix ();
glTranslatef (0., 0., 1.);
glRotatef(shift, 0, 1, 0);
glRotatef(count, 0, 1, 0);
gluDisk (quad, 0.5, .5, 50, 1);
glPopMatrix ();
}
void display() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(30, aspect, .5, 50);
glMatrixMode(GL_MODELVIEW); //select the modelview matrix.
glLoadIdentity ();
gluLookAt(0,0,4,
0,0,0,
0,1,0);
glPushMatrix();
glColor3f(1, 0, 1);
CreateDisk(20);
glColor3f(1, 0, 0);
CreateDisk(60);
glPopMatrix();
glutSwapBuffers();
}
Currently it's creating two disks, that's for testing.
First screen Second screen

creating a grids of squares (2D) in opengl using c++

I want to make a square (2D) by using several smaller squares(like a grid). I need to do this because I want to texture an image on the main square and divide the picture into several pixels(the said smaller squares).
Here's the program:-
int h=1,w=1;
int res=25;// res is the number of smaller squares I want
float hratio=h/res;
float wratio=w/res;
void Draw()
{
float x,y;
for(y=-.5;y<=h;y+=h/res)
{
for(x=-.5;x<=w;x+=w/res)
{
glColor3f(1,1,1);
glBegin(GL_QUADS);
glVertex3f(x,y+(h/res),0);
glVertex3f(x+(w/res),y+(h/res),0);
glVertex3f(x+(w/res),y,0);
glVertex3f(x,y,0);
glEnd();
}
}
glFlush();
glutPostRedisplay();
}
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
Draw();
glutSwapBuffers();
glutPostRedisplay();
}
int main(int iArgc, char** cppArgv)
{
glutInit(&iArgc, cppArgv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(600, 600);
glutInitWindowPosition(200, 200);
glutCreateWindow("PIXELS");
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
when I run this program, all I get is a black screen.
Try this display function:
void display() {
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
//set viewport
glViewport(0, 0, screen.width, screen.height);
//set projection matrix using intrinsic camera params
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
float aspect = screen.width*1.0/screen.height;
//gluPerspective is arbitrarily set, you will have to determine these values based
//on the intrinsic camera parameters
gluPerspective(60.0f, aspect, 0.1f, 100.0f);
//you will have to set modelview matrix using extrinsic camera params
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, 5, 0, 0, 0, 0, 1, 0);
glLoadIdentity();
glTranslatef(0.0, 0.0, -5.0);
Draw();
glutSwapBuffers(); // Swap the front and back frame buffers (double buffering)
gluPostRedisplay();
}
In your case screen.width is 600 and screen.height is 600.

Scaling 3D shapes (OpenGL and C++)

I've created a program in OpenGL that draws some shapes. I want the user to be able to zoom in on the shapes if they want to. This is the code that draws the shapes:
/*Initialise the required OpenGL functions*/
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0.0, screenWidth, screenHeight, 0.0, -1.0, 10.0);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glDisable(GL_CULL_FACE);
glClear(GL_DEPTH_BUFFER_BIT);
/*Draw a square*/
glColor3f(1, 0, 0);
glBegin(GL_QUADS);
glVertex2f(screenWidth * 0.75, screenHeight * 0.08333);
glVertex2f(screenWidth * 0.75, screenHeight * 0.16666);
glVertex2f(screenWidth * 0.86666, screenHeight * 0.16666);
glVertex2f(screenWidth * 0.86666, screenHeight * 0.08333);
glEnd();
glColor3f(0, 0, 0);
/*Let the user zoom*/
if (GetAsyncKeyState(VK_UP))
{
/*"zoom" is a global variable*/
zoom += 0.005;
}
glScaled(1 + zoom, 1 + zoom, 1);
/*Everything that is drawn from this point on (A sphere and a cube) should be scaled*/
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(-0.3, 0, 0);
glutSolidSphere(3, 20, 20);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.55, 0.36, 0);
glutSolidCube(0.05);
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
glutSwapBuffers();
The code draws the shapes properly, but the shapes can't be scaled. I've used similar code in some other functions, so I believe that it may be because I am using 3D shapes or it may have something to do with me calling "glMatrixMode" multiple times. Either way, how should I change my code so that the cube and sphere are scaled based on user input, but the first square is not affected?
glScaled() changes the current matrix. So as soon as you call glLoadIdentity() you are undoing your scaling. You are doing lots of unnecessary calls to glMatrixMode() and glLoadIdentity() that should be eliminated. So try something more like this:
// You probably don't really need to do these, but if you do, do it once up top.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glPushMatrix(); // Save the current matrix
glScaled(1 + zoom, 1 + zoom, 1); // Scale it
/*Everything that is drawn from this point on (A sphere and a cube) should be scaled*/
glTranslatef(-0.3, 0, 0);
glutSolidSphere(3, 20, 20);
glTranslatef(0.55, 0.36, 0);
glutSolidCube(0.05);
glPopMatrix(); // Undo the glScaled() call above
glutSwapBuffers();

How to scale glDrawPixels?

I need to scale the result of glDrawPixels image.
I'm drawing a 640x480 pixels image buffer with glDrawPixels in a Qt QGLWidget.
I tryed to do the following in PaintGL:
glScalef(windowWidth/640, windowHeight/480, 0);
glDrawPixels(640,480,GL_RGB,GL_UNSIGNED_BYTE,frame);
But it doesn't work.
I am setting the OpenGL viewport and glOrtho with the size of the widget as:
void WdtRGB::paintGL() {
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Setup the OpenGL viewpoint
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, windowWidth, windowHeight, 0, -1.0, 1.0);
glDepthMask(0);
//glRasterPos2i(0, 0);
glScalef(windowWidth/640, windowHeight/480, 0);
glDrawPixels(640,480,GL_RGB,GL_UNSIGNED_BYTE,frame);
}
//where windowWidth and windowHeight corresponds to the widget size.
/the init functions are:
void WdtRGB::initializeGL() {
glClearColor ( 0.8, 0.8, 0.8, 0.0); // Background to a grey tone
/* initialize viewing values */
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, windowWidth, windowHeight, 0, -1.0, 1.0);
glEnable (GL_DEPTH_TEST);
}
void WdtRGB::resizeGL(int w, int h) {
float aspect=(float)w/(float)h;
windowWidth = w;
windowHeight = h;
glViewport (0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
if( w <= h )
glOrtho ( -5.0, 5.0, -5.0/aspect, 5.0/aspect, -5.0, 5.0);
else
glOrtho (-5.0*aspect, 5.0*aspect, -5.0, 5.0, -5.0, 5.0);
//printf("\nresize");
emit changeSize ( );
}
It sounds like what you actually need to do instead of calling glDrawPixels () is to load your image data into a texture and draw a textured quad the size of the window. So something like this:
glGenTextures (1, &texID);
glBindTextures (GL_TEXTURE_RECTANGLE_EXT, texID);
glTexImage2D (GL_TEXTURE_RECTANGLE_EXT, 0, GL_RGBA, 640, 480, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, frame);
glBegin (GL_QUADS);
glTexCoord2f (0, 0);
glVertex2f (0, 0);
glTexCoord2f (640, 0);
glVertex2f (windowWidth, 0);
glTexCoord2f (640, 480);
glVertex2f (windowWidth, windowHeight);
glTexCoord2f (0, 480);
glVertex2f (0, windowHeight);
glEnd();
Or if that's too much work, glPixelZoom (windowWidth / 640, windowHeight / 480), might do the trick, too.
Just for further reference: Instead of doing the glScalef(windowWidth/640, windowHeight/480, 0); before the glDrawPixels you should do a pixel zoom:
glRasterPos2i(0,0);
GLint iViewport[4];
glGetIntegerv(GL_VIEWPORT, iViewport);
glPixelZoom(iViewport[2]/640, iViewport[3]/480);
glDrawPixels(640,480,GL_RGB,GL_UNSIGNED_BYTE,frame);
I need to scale the result of glDrawPixels image.
glDrawPixels directly goes to the framebuffer. So every incoming pixel is mapped 1:1 to the output. There is a function (ah, why am I telling you this) glPixelZoom, which allows you to zoom glDrawPixels.
BUT I urge you, not to use glDrawPixels!
Use textures quads instead. glDrawPixels is a depreciated function, no longer supported by modern OpenGL-3. And even when it was not deprecated it still is a very slow function. Textured quads are better in every regard.

Opengl: 2d HUD over 3D

I have looked at some questions posted here on the matter and still cant work out why my 2d HUD appears but makes my 3d Rendered world disappear.
EDIT: It seems that the 2d scene is taking control of the entire screen so every now and then I can see the 3d scene glitching through the 2d scene. So even though I its only ment to be rendering a quad thats 10 x 10 pixels it renders this then blanks out the rest of the screen.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(40.0,(GLdouble)x/(GLdouble)y,0.5,20.0);
glMatrixMode(GL_MODELVIEW);
glViewport(0,0,x,y);
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
glTranslatef(0.0,-0.5,-6.0);
glPushMatrix();
..Draw some 3d stuff...
glPopMatrix();
// Start 2d
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor3f(0.0f, 255.0f, 1.0f);
glBegin(GL_QUADS);
glVertex2f(0.0, 0.0);
glVertex2f(10.0, 0.0);
glVertex2f(10.0, 10.0);
glVertex2f(0.0, 10.0);
glEnd();
Then I swap buffers
Here is the order of my code. Its like it makes the 3d space then makes the 2d space which in turn cancels out the 3d space.
Took a little while to figure it out, so just in case others have the same issues:
...After Drawing 3d Stuff...
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0.0, SCREEN_WIDTH, SCREEN_HEIGHT, 0.0, -1.0, 10.0);
glMatrixMode(GL_MODELVIEW);
//glPushMatrix(); ----Not sure if I need this
glLoadIdentity();
glDisable(GL_CULL_FACE);
glClear(GL_DEPTH_BUFFER_BIT);
glBegin(GL_QUADS);
glColor3f(1.0f, 0.0f, 0.0);
glVertex2f(0.0, 0.0);
glVertex2f(10.0, 0.0);
glVertex2f(10.0, 10.0);
glVertex2f(0.0, 10.0);
glEnd();
// Making sure we can render 3d again
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
//glPopMatrix(); ----and this?
...Then swap buffers...
:)
If you're overlaying a 2D ortho projection over 3D, you generally want to get the depth buffer out of the equation:
glDepthMask(GL_FALSE); // disable writes to Z-Buffer
glDisable(GL_DEPTH_TEST); // disable depth-testing
Of course, you'll want to reset these to their original values before doing your next 3D pass.
glViewport(0, 0, x, y); //You need to do this only once on viewport resize
//Setup for 3D
glMatrixMode(GL_PROJECTION);
glLoadIdentity;
gluPerspective(40.0, (GLdouble)x/(GLdouble)y, 0.5, 20.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity;
glClear(GL_COLOR_BUFFER_BIT or GL_DEPTH_BIT);
// ... Render 3D ...
//Setup for 2D
glMatrixMode(GL_PROJECTION);
glLoadIdentity;
glOrtho(0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity;
glClear(GL_COLOR_BUFFER_BIT or GL_DEPTH_BIT);
// ... Render 2D ...
SwapBuffers;
Note that there's no need to handle Push/Pop of matrixes if you render 2D completely on top of 3D.