problem with QOpenGLWidget, QOpenGLWindow - opengl

Can anyone tell me why the following code in my paintGL does not draw when using the DrawArrays function? I'm using Qt 5.14.2 in Qt Creator on the last Windows 10.
A few comments;
I'm experimenting to try to understand the differences between implementations of OpenGL in Qt using the following declarations. In time, I will write my app using the implementation that I like best.
class OpenGLWindow : public QWindow, public QOpenGLFunctions
This works fine when I place the following code in the render() function, no issues at all, very nice!!
class myOpenGLWidget : public QOpenGLWidget, public QOpenGLFunctions
I place the code in the paintGL function. I can color the background, but glDrawArrays() does nothing. However, I can draw a triangle, the code between the glBegin and glEnd statements is successful. I'm not getting any errors. I test for the result of the m_program->bind() call and it comes back true.
class myQOpenGLWindow : public QOpenGLWindow, protected QOpenGLFunctions
Same as #2 except that I place the code in the render() function. I can color the background, but glDrawArrays() does nothing. However, I can draw a triangle, the code between the glBegin and glEnd statements is successful. I test for the result of the m_program->bind() call and it comes back true.
If anybody needs to ask, I'm doing it this way after perusing dozens of different tutorials, it's the best information that I've been able to find.
thank you!!
{
// Draw the scene:
QOpenGLFunctions *f = QOpenGLContext::currentContext()->functions();
const qreal retinaScale = devicePixelRatio();
f->glViewport(0, 0, width() * retinaScale, height() * retinaScale);
f->glClearColor(red, green, blue, 1.0f);
f->glClear(GL_COLOR_BUFFER_BIT);
QMatrix4x4 matrix;
matrix.perspective(60.0f, 4.0f / 3.0f, 0.1f, 100.0f);
matrix.translate(0, 0, 0);
matrix.rotate(0, 1, 0, 0);
m_program->setUniformValue(m_matrixUniform, matrix);
glBegin(GL_TRIANGLES);
glColor3f(1.0, 0.0, 0.0);
glVertex3f(-0.5, -0.5, 0);
glColor3f(0.0, 1.0, 0.0);
glVertex3f( 0.5, -0.5, 0);
glColor3f(0.0, 0.0, 1.0);
glVertex3f( 0.0, 0.5, 0);
glEnd();
//f->glBindTexture(GL_TEXTURE_2D, 0);
bound = m_program->bind();
GLfloat line_vertices[2160];
for (int v=0;v<360;v++)
{
line_vertices[(6*v)]=-3.5+float(v)/25;
line_vertices[(6*v)+1]=1.1+qSin(5*2*v*(M_PI)/180);
line_vertices[(6*v)+2]=-5;
line_vertices[(6*v)+3]=-3.5+float(v+1)/25;
line_vertices[(6*v)+4]=1.1+qSin(5*2*(v+1)*(M_PI)/180);;
line_vertices[(6*v)+5]=-5;
}
GLfloat line_colors[2160];
for (int v=0;v<360;v++)
{
line_colors[(6*v)]=1.0;
line_colors[(6*v)+1]=0;
line_colors[(6*v)+2]=0;
line_colors[(6*v)+3]=1.0;
line_colors[(6*v)+4]=0;
line_colors[(6*v)+5]=0;
}
f->glVertexAttribPointer(m_posAttr, 3, GL_FLOAT, GL_FALSE, 0, line_vertices);
f->glVertexAttribPointer(m_colAttr, 3, GL_FLOAT, GL_FALSE, 0, line_colors);
f->glEnableVertexAttribArray(m_posAttr);
f->glEnableVertexAttribArray(m_colAttr);
f->glDrawArrays(GL_LINES, 0, 360);
f->glDisableVertexAttribArray(m_colAttr);
f->glDisableVertexAttribArray(m_posAttr);
m_program->release();
}

GL_VERSION = 4.6.0
Here is the section of code where I define the position and color vertices, the VBO, various setting up of position and color buffers, and then the glDrawArrays function. I have tried this code in both the render() and in the paintgl() functions. I get no errors, but I get no pretty lines either. The triangle vertices defined between begin() and end() do show up though.
GLfloat line_vertices[2160];
for (int v=0;v<360;v++)
{
line_vertices[(6*v)]=-3.5+float(v)/25;
line_vertices[(6*v)+1]=1.1+qSin(5*2*v*(M_PI)/180);
line_vertices[(6*v)+2]=-5;
line_vertices[(6*v)+3]=-3.5+float(v+1)/25;
line_vertices[(6*v)+4]=1.1+qSin(5*2*(v+1)*(M_PI)/180);;
line_vertices[(6*v)+5]=-5;
}
GLfloat line_colors[2160];
for (int v=0;v<360;v++)
{
line_colors[(6*v)]=1.0;
line_colors[(6*v)+1]=0;
line_colors[(6*v)+2]=0;
line_colors[(6*v)+3]=1.0;
line_colors[(6*v)+4]=0;
line_colors[(6*v)+5]=0;
}
QOpenGLBuffer m_vertexBufferCube;
QOpenGLBuffer m_colorBufferCube;
m_program->setUniformValue(m_matrixUniform, matrix);
m_vertexBufferCube.create();
m_vertexBufferCube.setUsagePattern(QOpenGLBuffer::StaticDraw);
m_vertexBufferCube.allocate(line_vertices, 3 * 360 * sizeof(float));
m_colorBufferCube.create();
m_colorBufferCube.setUsagePattern(QOpenGLBuffer::StaticDraw);
m_colorBufferCube.allocate(line_colors, 3 * 360 * sizeof(float));
bound = m_vertexBufferCube.bind();
//m_program->setAttributeBuffer("vertexPosition", GL_FLOAT,0,3);
m_program->setAttributeBuffer(m_posAttr, GL_FLOAT,0,3);
m_colorBufferCube.bind();
//m_program->setAttributeBuffer("colorPosition", GL_FLOAT,0,3);
m_program->setAttributeBuffer(m_colAttr, GL_FLOAT,0,3);
//glVertexAttribPointer(m_posAttr, 3, GL_FLOAT, GL_FALSE, 0, line_vertices);
//glVertexAttribPointer(m_colAttr, 3, GL_FLOAT, GL_FALSE, 0, line_colors);
//glEnableVertexAttribArray(m_posAttr);
//glEnableVertexAttribArray(m_colAttr);
glDrawArrays(GL_LINES, 0, 360);

I believe this is what you are looking for when you ask to see how I've set up my VAO, please correct me if I'm wrong.
// Create Shader (Do not release until VAO is created)
m_program = new QOpenGLShaderProgram(this);
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, ogl_vertexShaderSource);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, ogl_fragmentShaderSource);
m_program->link();
m_posAttr = m_program->attributeLocation("posAttr");
m_colAttr = m_program->attributeLocation("colAttr");
m_matrixUniform = m_program->attributeLocation("matrix");
m_program->release();

Related

How to render some vertices in a buffer as points and the rest using lines in qopenglwidget?

I have a set of vertices and normals stored in a buffer. I want to display most of the vertices as points but i want draw lines for the remaining few vertices. These are all stored inside one vector with the points part in the front and i know the location of the buffer until they are to be displayed using points. I also know the count of elements for each drawing task. I am using only one vao and one buffer object for this task.
I initialize the GLWidget with the following code:
void GLWidget::initializeGL()
{
connect(context(), &QOpenGLContext::aboutToBeDestroyed, this, &GLWidget::cleanup);
initializeOpenGLFunctions();
glClearColor(0, 0, 0, m_transparent ? 0 : 1);
m_program = new QOpenGLShaderProgram;
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, m_core ? vertexShaderSourceCore : vertexShaderSource);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, m_core ? fragmentShaderSourceCore : fragmentShaderSource);
m_program->bindAttributeLocation("vertex", 0);
m_program->bindAttributeLocation("normal", 1);
m_program->link();
m_program->bind();
m_projMatrixLoc = m_program->uniformLocation("projMatrix");
m_mvMatrixLoc = m_program->uniformLocation("mvMatrix");
m_normalMatrixLoc = m_program->uniformLocation("normalMatrix");
m_lightPosLoc = m_program->uniformLocation("lightPos");
m_vao.create();
QOpenGLVertexArrayObject::Binder vaoBinder(&m_vao);
m_obj.create();
setupBuffer();
setupVertexAttribs();
m_camera.setToIdentity();
QVector3D camPos = QVector3D(0.0, 0.0, 15.0);
m_camera.translate(-camPos);
QVector3D camTarget = QVector3D(0.0, 0.0, 0.0);
QVector3D camDirection = QVector3D(camPos - camTarget).normalized();
QVector3D worldUp = QVector3D(0.0, 1.0, 0.0);
QVector3D camRight = QVector3D::crossProduct(worldUp, camDirection).normalized();
QVector3D camUp = QVector3D::crossProduct(camDirection, camRight);
m_camera.lookAt(camPos, camTarget, camUp);
// Light position is fixed.
m_program->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 200));
m_program->release();
}
Where the functions setupBuffer() setupVertexAtrribs() do the same tasks as their names imply. The vertices are layed out in the buffer with xyz positions of the vertex followed by the xyz of its associated normal. They are implemented as follows
void GLWidget::setupBuffer()
{
m_obj.bind();
m_obj.allocate(vertices.constData(), vertices.size() * sizeof(GLfloat));
}
void GLWidget::setupVertexAttribs()
{
m_obj.bind();
QOpenGLFunctions *f = QOpenGLContext::currentContext()->functions();
f->glEnableVertexAttribArray(0);
f->glEnableVertexAttribArray(1);
f->glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), reinterpret_cast<void *>(0));
f->glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), reinterpret_cast<void *>(3 * sizeof(GLfloat)));
m_obj.release();
}
Now, the QVector vertices is the buffer that is being passed to opengl. The last few entries in this vector are the vertices that need to be drawn using GL_LINES.
My paintGL() function looks something like this:
void GLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glEnable(GL_POINT_SIZE);
glEnable(GL_LINE_WIDTH);
glPointSize(2);
glLineWidth(10);
m_world.setToIdentity();
m_world.rotate(180.0f - (m_xRot / 16.0f), 1, 0, 0);
m_world.rotate(m_yRot / 16.0f, 0, 1, 0);
m_world.rotate(m_zRot / 16.0f, 0, 0, 1);
m_world.scale(m_dispScale);
QOpenGLVertexArrayObject::Binder vaoBinder(&m_vao);
m_program->bind();
m_program->setUniformValue(m_projMatrixLoc, m_proj);
m_program->setUniformValue(m_mvMatrixLoc, m_camera * m_world);
QMatrix3x3 normalMatrix = m_world.normalMatrix();
m_program->setUniformValue(m_normalMatrixLoc, normalMatrix);
glDrawArrays(GL_POINTS, 0, vertices.size() - camVertices.size());
// Draw camera frustums
glDrawArrays(GL_LINES, vertices.size() - camVertices.size(), camVertices.size());
//glDrawElements(GL_POINTS, vecIndices.size(), GL_UNSIGNED_INT, 0);
m_program->release();
}
QVector camVertices is another vector that contains points that need to be drawn using lines. The data in camVertices is appended to the end of the vector 'Vertices' before rendering. As seen in the above code, I call glDrawArrays function twice - first, starting from 0 index of the buffer, second, starting from where the previous call ended to display the remaining of the points.
The problem is that the points are being displayed fine. However the second call only displays the points but does not draw any lines.
Here's a link to a screenshot of the displayed output - https://drive.google.com/open?id=1i7CjO1qkBALw78KKYGvBteydhfAWh3wh
The picture shows an example of the displayed out where the bright green points seen on the top outlying from the rest (box of many points) are the ones that are to be drawn with lines. However, I only see points but not any lines.
i did a simple test and i'm able to draw points and lines using the following code:
glDrawArrays(GL_POINTS, 0, verticesCount() - 10);
glDrawArrays(GL_LINES, 10, 10);
Which is not very different from yours except for the variables. I'm also using 1 VAO. So it's definitely possible to draw lines after points as we would expect.
Could you try the same (using an integer instead of your variables)
Can you show the debug information on your vertices?
Can you upload a minimal compilable example?

OpenGL vertex array not rendering

I'm trying to draw some basic triangles using opengl, but it's not rendering on screen. These are the relevant functions:
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
Vertex vertices[] = {Vertex(Vector3f(0.0, 1.0, 0.0)),
Vertex(Vector3f(-1.0, -1.0, 0.0)),
Vertex(Vector3f(1.0, -1.0, 0.0))};
mesh.addVertices(vertices, 3);
Pastebin links to Vertex.hpp and Vector3f.hpp:
Vertex.hpp
Vector3f.hpp
/*
* Mesh.cpp:
*/
Mesh::Mesh()
{
glGenBuffers(1, &m_vbo); // unsigned int Mesh::m_vbo
}
void Mesh::addVertices(Vertex vertices[4], int indexSize)
{
m_size = indexSize * 3;
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, m_size, vertices, GL_STATIC_DRAW);
}
void Mesh::draw()
{
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 4 * sizeof(Vertex), 0);
glDrawArrays(GL_TRIANGLES, 0, m_size);
glDisableVertexAttribArray(0);
}
It's just black if I call glClear otherwise just the random noise of a default window. I can make it draw a triangle by using the most primitive method:
glBegin(GL_TRIANGLES);
glColor3f(0.4, 0.0, 0.0);
glVertex2d(0.0, 0.5);
glVertex2d(-0.5, -0.5);
glVertex2d(0.5, -0.5);
glEnd();
That works and displays what it should do correctly, so I guess that at least says my application is not 100% busted. The tutorial I'm following is in Java, and I'm translating it to C++ SFML as I go along, so I guess it's possible that something got lost in translation so to speak, unless I'm just missing something really basic (more likely.)
How do we fix this so it uses the Vertex list to draw the triangle like it's supposed to?
So many mistakes. There are truly a lot of examples, in any language, so why?
const float pi = 3.141592653589793; is member field of Vector3f. Do you realise this is non-static member and it is included in each and every Vector3f you use, so your vectors actually have four elements - x, y, z, and pi? Did you informed GL about it, so it could skip this garbage data? I don't think so.
You using glVertexAttribPointer, but don't have active shader. There is no guarantee that position is in slot 0. Either use glVertexPointer, or use shader with position attribute bound to 0.
void Mesh::addVertices(Vertex vertices[4], int indexSize) what [4] supposed to mean here? While it is not an error, it is at least misguiding.
glBufferData(GL_ARRAY_BUFFER, m_size, vertices, GL_STATIC_DRAW); m_size is 3*3 in your example, while documentation says it should be array size in bytes - which is sizeof(Vertex) * indexSize.
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 4 * sizeof(Vertex), 0); why stride parameter is 4*sizeof(Vertex)? Either set it to 0 or write correct stride - which is sizeof(Vertex).
glDrawArrays(GL_TRIANGLES, 0, m_size); m_size is already [incorrectly] set as "vertex buffer size", while DrawArrays expects number of vertices to draw - which is m_size / sizeof(Vertex) (given m_size is calculated correctly).

Drawing an array

I am trying to draw one line with ar[]. It contains the point cords. I am also trying to use the color described in clr[]. Can any one tell me what is wrong with my ver function. When I run it, only a white screen comes up.
void ver(void)
{
glClear(GL_DEPTH_BUFFER_BIT);
glPushMatrix();
GLfloat ar [] = {0.25, 0.25,
0.5, 0.25,
};
GLfloat clr [] = {1.0, 0.0,0.0
};
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2,GL_FLOAT, 0, ar);
glColorPointer(3,GL_FLOAT,0,clr);
glDrawElements(GL_LINES, 2, GL_FLOAT, ar);
glDrawElements(GL_LINES, 3, GL_FLOAT, clr);
glPopMatrix();
glutSwapBuffers();
}
Your call to glDrawElements() is wrong. You need to pass an array of indices to it, and you only need to call it once. So you need something like this:
GLuint indices[] = { 0, 1, 2, 3 };
glDrawElements (GL_LINES, 2, GL_UNSIGNED_INT, indices);
Also, I think you need to expand your color array to have one color per vertex, so it should look more like:
GLfloat clr [] = { 1.0, 0.0, 0.0,
1.0, 0.0, 0.0 };

Why do the colours of my model blur together?

I have been trying to get an object to be lit properly all day long, without results, so I'm going to try here. In the essence, I am trying the object to look like this:
While in my program it looks like this:
Here's my context:
glClearDepth(1.0);
glShadeModel(GL_SMOOTH);
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glLight(GL_LIGHT0, GL_POSITION, {0, 5, 0, 1});
glDisable(GL_TEXTURE_2D);
And my material/lighting settings:
glMaterialf(GL_FRONT, GL_SHININESS, 10);
glLight(GL_LIGHT0, GL_AMBIENT, {0.1, 0.1, 0.1, 1.0});
glLight(GL_LIGHT0, GL_DIFFUSE, {0.2, 0.2, 0.2, 1.0});
glLight(GL_LIGHT0, GL_SPECULAR, {0.6, 0.6, 0.6, 1.0});
glMaterial(GL_FRONT, GL_AMBIENT, {1, 0.8078, 0});
glMaterial(GL_FRONT, GL_DIFFUSE, {1, 0.8078, 0});
glMaterial(GL_FRONT, GL_SPECULAR, {0.5, 0.5, 0.5});
glMaterial(GL_FRONT, GL_EMISSION, {0, 0, 0});
[I used {r, g, b, a} to denote an array for simplicity. I looked up the actual values that were used to draw the model and wrote them into the arrays]
The main problem is that whenever my objects get fully lit, everything "clutters" together into the ambient colour. There are no lighter and darker parts depending on the orientation of the fragment anymore, just one chunk of solid colour.
I have searched the whole project for openGL settings I may have missed, though the only thing I found was what you see above (omitting a few calls to reset the projection and modelview matrices, clearing the screen, and a translation/rotation).
I have also tried to alter the values of the lights and materials, without much success. Changing the ambient colour just causes the whole model to become brighter. I also tried moving the light.
EDIT: By request, here's how I store and draw the model:
I load the model from an OBJ file. I looked at the file itself, which lists normals. The loader also recognizes that.
Here's how it draws the VBO:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vertexBufferID);
glVertexPointer(3, GL_FLOAT, stride, 0 * 4);
glNormalPointer(GL_FLOAT, stride, 3 * 4);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, indexBufferID);
glDrawElements(GL_TRIANGLES, this.numberOfVertices, GL_UNSIGNED_INT, 0);
The VBO is interleaved, formatted like [vertex, normal, vertex, normal, ..]. I dropped some print calls in the drawing code, and it sets the vertex and normal pointers. Hence I am pretty sure that the VBO itself is loaded and drawn correctly.
It's crashing because you leave texture_coord_array client state enabled, so it tries to access whatever is at the texcoord pointer, which is null.
So with many problems, the answer is often unexpected. The first part was the switch statement I used to determine how to set the data pointers of the VBO:
private void setDataPointers() {
int stride = this.dataFormat.elementsPerVertex * 4;
glVertexPointer(3, GL_FLOAT, stride, 0 * 4);
switch(this.dataFormat)
{
case VERTICES_AND_TEXTURES:
glTexCoordPointer(2, GL_FLOAT, stride, (3)*4);
case VERTICES_AND_NORMALS:
glNormalPointer(GL_FLOAT, stride, 3 * 4);
case VERTICES_TEXTURES_NORMALS:
glTexCoordPointer(2, GL_FLOAT, stride, 3 * 4);
glNormalPointer(GL_FLOAT, stride, (3 + 3) * 4);
}
}
As you can see the break; statements are missing. So the vertex, normal AND texture pointers would be set.
Second, when drawing the VBO, all three client side modes were enabled:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
So the renderer was looking for texture coordinates that didn't exist, and somehow spit out the strange unlit geometry.

Position moving 2D object openGL

I have an object moving back and forth on the x axis, however i cant position it further right along the x axis.
this is my code, how do i do it?
float moveRad = 0.0;
moveRad = moveBee * (PI/180.0);
moveBee += 0.1;
glPushMatrix();
glTranslatef(50.0 * sinf(moveRad), -100,0);
e[0] = new Platform(0, 0, 0, 40, 33, 40, 33, 00, textures[23], (50.0 * sinf(moveRad)), -100);
glPopMatrix();
Platform.cpp creates the object like so:
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex2f(x1,y1);
glTexCoord2f(0.0, 1.0); glVertex2f(x2,y2);
glTexCoord2f(1.0, 1.0); glVertex2f(x3,y3);
glTexCoord2f(1.0, 0.0); glVertex2f(x4,y4);
glEnd();
I have the feeling you suffer from a misconception of how OpenGL works. You wrote "Platform.cpp creates the object like so:" and in the code snippet before I can see you're creating the instance of some Plattform class surrounded by OpenGL matrix stack operations. I suspect you assumed that OpenGL would somehow "store" this "object". This is not how OpenGL works You're thinking in terms of a scene graph. OpenGL is not a scene graph.
OpenGL is a drawing API. The calls
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex2f(x1,y1);
glTexCoord2f(0.0, 1.0); glVertex2f(x2,y2);
glTexCoord2f(1.0, 1.0); glVertex2f(x3,y3);
glTexCoord2f(1.0, 0.0); glVertex2f(x4,y4);
glEnd();
draw a quad to the screen. Again: They draw it. After those commands have been issued they are gone and forgotten by OpenGL. The OpenGL transformation matrices are used for transforming the drawing commands' input data. But again there's no persistency. The drawing commands have to be issued for every frame drawn. I first thought I could rewrite some of your code, but it needs to be rewritten ground up, if I may say so.
The typical OpenGL program looks like this (I liberally omit all the class and type definitions and expect some common sense interpreting the variable, member and method names).
/* draw_scene is called on every iteration of the program main loop or
the drawing callback handler to update the screen */
void Scene::draw_scene(ScreenInfo si)
{
glViewport(si.viewport.x, si.viewport.y, si.viewport.width, si.viewport.height);
glClearColor(this->clear.r, this->clear.g, this->clear.b, this->clear.a);
glClearDepth(this->clear.d);
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
glDepthMask(GL_TRUE);
glClear( (this->clear.color ? GL_COLOR_BUFFER_BIT) |
(this->clear.depth ? GL_DEPTH_BUFFER_BTT) );
std::list<SceneObjects*> objects_by_distance =
sort_objects_by_direction(scene->objects,
scene->active_camera->position
scene->active_camera->direction);
SceneObjects *closest_object = objects_by_distance.front();
SceneObjects *farthest_object = objects_by_distance.back();
float near_clip = max(NEAR_CLIP_LIMIT,
length(closest_object->position - scene->active_camera->position)
- closest_object->bounding_sphere.radius );
float far_clip = min(FAR_CLIP_LIMIT,
length(farthest_object->position - scene->active_camera->position)
+ farthest_object->bounding_sphere.radius );
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
switch( scene->projection.type ) {
case Projection::perspective: {
gluPerspective( scene->projection.fov,
(float)si.viewport.width/(float)si.viewport.height,
near_clip, far_clip);
} break;
case Projection::orthographic: {
float aspect = (float)si.viewport.width/(float)si.viewport.height;
glOrtho( -0.5 * scene->projection.size * aspect, 0.5 * scene->projection.size * aspect
-0.5 * scene->projection.size 0.5 * scene->projection.size );
} break;
}
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
/* I normally disregard using gluLookAt, but in this case I use it
to show as much as possible! */
gluLookAt( scene->active_camera->position.x, scene->active_camera->position.y, scene->active_camera->position.z,
scene->active_camera->position.x + scene->active_camera->direction.x,
scene->active_camera->position.y + scene->active_camera->direction.y,
scene->active_camera->position.z + scene->active_camera->direction.z,
scene->active_camera->up.x, scene->active_camera->up.y, scene->active_camera->up.z );
for_each(scene->objects.begin(), scene->objects.end(), draw_object)
}
void draw_object(SceneObject *object)
{
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glTranslatef(object->position.x, object->position.y, object->position.z);
glRotatef(object->rotation.axis.angle, object->rotation.axis.x, object->rotation.axis.y, object->rotation.axis.z);
GLfloat *(vertex_ptr[3][3]) = object->mesh->vertices;
GLuint *vertex_indices = object->mesh->face_vertex_indices;
#ifdef USE_IMMEDIATE_MODE
glBegin(GL_TRIANGLES);
for(int i = 0; i < object->mesh->face_count; i++) {
glNormalfv(&vertex_ptr[vertex_indices[i]][0]);
glTexCoord3fv(&vertex_ptr[vertex_indices[i]][1]);
glVertex3fv(&vertex_ptr[vertex_indices[i]][2]);
glNormalfv(&vertex_ptr[vertex_indices[i+1]][0]);
glTexCoord3fv(&vertex_ptr[vertex_indices[i+1]][1]);
glVertex3fv(&vertex_ptr[vertex_indices[i+1]][2]);
glNormalfv(&vertex_ptr[vertex_indices[i+2]][0]);
glTexCoord3fv(&vertex_ptr[vertex_indices[i+2]][1]);
glVertex3fv(&vertex_ptr[vertex_indices[i+2]][2]);
}
glEnd();
#else
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
/* This is direct vertex array mode.
A more modern approach is using Vertex Buffer Objects, which reused this
API, but adds further function calls. */
glNormalPointer(GL_FLOAT, 3*3*sizeof(GLfloat), &vertex_ptr[0][0]);
glTexCoordPointer(3, GL_FLOAT, 3*3*sizeof(GLfloat), &vertex_ptr[0][1]);
glVertexPointer(3, GL_FLOAT, 3*3*sizeof(GLfloat), &vertex_ptr[0][2]);
glDrawElements(GL_TRIANGLES, object->mesh->face_count*3, GL_UNSIGNED_INT, vertex_indices);
#endif
glPopMatrix();
}
This is the most basic way use OpenGL seriously. I wrote it in this detail to give you the idea how to use it, and how it works.
Why don't you adjust the x-axis scaling in your call to glTranslatef?
glTranslatef(amplitude * sinf(moveRad), -100,0);
I have a feeling you don't know exactly what your code is doing (correct me if I'm wrong). If you want to move it to the right just add a number in here.
glTranslatef(50.0 * sinf(moveRad) + 30, -100,0);
I'll update my answer if neccesary.
I think your problem is the '50.0 * sinf(moveRad)' - that will oscilate between -50 and 50. Try adding a value instead of or as well as multiplying it.