C++ OpenGL mesh rendering - c++

I know there are a lot of resources about this on the internet but they didn't quite seem to help me.
What I want to achieve:
I am baking a mesh from data which stores the vertices inside a vector<Vector3>.
(Vector3 is a sctruct containg float x, y, z)
It stores triangles in a map<int, vector<int>>
(the key of the map is the submesh and the vector<int> the triangles)
the uv inside a vector<Vector2>
(Vector2 is a struct containing float x, y)
and a color value in vector<Color>
(the color value applies to vertices like the uv does)
Now I want to write a code that can read that data and draw it to the screen with maximum performance
What I got:
static void renderMesh(Mesh mesh, float x, float y, float z) {
if (mesh.triangles.empty()) return;
if (mesh.vertices.empty()) return;
if (mesh.uvs.empty()) return;
glColor3f(1, 1, 1);
typedef std::map<int, std::vector<int>>::iterator it_type;
for (it_type iterator = mesh.triangles.begin(); iterator != mesh.triangles.end(); iterator++) {
int submesh = iterator->first;
if (submesh < mesh.textures.size()) glBindTexture(GL_TEXTURE_2D, mesh.textures[submesh].id);
else glBindTexture(GL_TEXTURE_2D, 0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
for (int i = 0; i < iterator->second.size(); i += 3) {
int t0 = iterator->second[i + 0];
int t1 = iterator->second[i + 1];
int t2 = iterator->second[i + 2];
Vector3 v0 = mesh.vertices[t0];
Vector3 v1 = mesh.vertices[t1];
Vector3 v2 = mesh.vertices[t2];
Color c0 = mesh.vertexColors[t0];
Color c1 = mesh.vertexColors[t1];
Color c2 = mesh.vertexColors[t2];
Vector2 u0 = mesh.uvs[t0];
Vector2 u1 = mesh.uvs[t1];
Vector2 u2 = mesh.uvs[t2];
glBegin(GL_TRIANGLES);
glColor4f(c0.r / 255.0f, c0.g / 255.0f, c0.b / 255.0f, c0.a / 255.0f); glTexCoord2d(u0.x, u0.y); glVertex3f(v0.x + x, v0.y + y, v0.z + z);
glColor4f(c1.r / 255.0f, c1.g / 255.0f, c1.b / 255.0f, c1.a / 255.0f); glTexCoord2d(u1.x, u1.y); glVertex3f(v1.x + x, v1.y + y, v1.z + z);
glColor4f(c2.r / 255.0f, c2.g / 255.0f, c2.b / 255.0f, c2.a / 255.0f); glTexCoord2d(u2.x, u2.y); glVertex3f(v2.x + x, v2.y + y, v2.z + z);
glEnd();
glColor3f(1, 1, 1);
}
}
}
The problem:
I found out that the way I render is not the best way and that you can achieve higher performance with glDrawArrays (I think it was called).
Could you help me rewriting my code to fit with glDrawArrays, since what I found so far on the internet did not help me too much.
Thanks, and if there is any more information needed just ask.

The use of functions like glBegin and glEnd is deprecated. Functions like glDrawArrays have a better performance, but slightly more complicated to use.
The problem of glBegin render techniques is you have to communicate each vertex one by one each time you want to draw something. Today, graphic cards are able to render thousands of vertices very quickly, but if you give it one by one, the render will become laggy regardless your graphic card performance.
The main advantage of glDrawArrays is you have to initialize your arrays once, and then draw it with one call. So first, you need to fill at the start of your program an array for each attribute. In your case: positions, colors and texture coords. It must be float arrays, something like this:
std::vector<float> vertices;
std::vector<float> colors;
std::vector<float> textureCoords;
for (int i = 0; i < iterator->second.size(); i += 3) {
int t0 = iterator->second[i + 0];
int t1 = iterator->second[i + 1];
int t2 = iterator->second[i + 2];
vertices.push_back(mesh.vertices[t0].x);
vertices.push_back(mesh.vertices[t0].y);
vertices.push_back(mesh.vertices[t0].z);
vertices.push_back(mesh.vertices[t1].x);
vertices.push_back(mesh.vertices[t1].y);
vertices.push_back(mesh.vertices[t1].z);
vertices.push_back(mesh.vertices[t2].x);
vertices.push_back(mesh.vertices[t2].y);
vertices.push_back(mesh.vertices[t2].z);
// [...] Same for colors and texture coords.
}
Then, in another function set only for display, you can use these arrays in order to draw it:
// Enable everything you need
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Set your used arrays
glVertexPointer(3, GL_FLOAT, 0, vertices.data());
glColorPointer(4, GL_FLOAT, 0, colors.data());
glTexCoordPointer(2, GL_FLOAT, 0, textureCoords.data());
// Draw your mesh
glDrawArrays(GL_TRIANGLES, 0, size); // 'size' is the number of your vertices.
// Reset initial state
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
Of course, you'll have to enable other attributes you want to use, like texture or blending.
NOTE:
If you wish to learn about performance, there are also other functions using indices in order to reduce the size of data used, like glDrawElements.
There are also other more advanced OpenGL techniques that allows you to increase performance by saving your data directly on the graphic card memory, like Vertex Buffer Objects.

Related

OpenGL - animating the data of the vector as separate frames

I have the output of calculation result (basically the certain amount of cuboids in the certain rotations) stored in the std::vector Box, based on which I am creating the model matrices for OpenGl visualization:
std::vector<glm::mat4> modelMatrices;
for (int32_t i = 0; i < Box.number_of_cuboids(); i++)
{
float rx, ry, rz, teta;
Box.cuboid(i).get_rotation(rx, ry, rz, teta, j);
float x, y, z;
Box.cuboid(i).position(x, y, z, j);
glm::mat4 model = glm::translate(glm::mat4(1.0f), glm::vec3(x, y, z))
* glm::rotate(glm::mat4(1.0f), teta, glm::vec3(rx, ry, rz))
* glm::scale(glm::mat4(1.0f), glm::vec3(
Box.cuboid(i).width(), Box.cuboid(i).length(j), Box.cuboid.height()));
modelMatrices.push_back(model);
}
}
and I can successfully visualise them like that:
while (!glfwWindowShouldClose(window))
{
processInput(window, Box.size_x(), Box.size_y(), Box.size_z());
glClearColor(0.95f, 0.95f, 0.95f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/* shaders part <...>*/
for (int32_t i = 0; i < modelMatrices.size(); i++)
{
ourShader.setMat4("model", modelMatrices[i]);
glDrawArrays(GL_TRIANGLES, 0, 36);
}
glfwSwapBuffers(window);
glfwPollEvents();
}
My problem is that Box is already the final output of the calculations. I would like to see the all the iterations steps, so basically what I would like to do:
function ManyCalculations (std::vector<Box>)
{
at every iteration I save the current status Box in the vector, i.e.
}
so basically after lets say 10000 iterations I end up with the same amount of Box elements, and now I would like to run such vector as frames/animation(video?) in my OpenGl function, and so I could see the evolving calculation of the contents.
To animate your objects with each frame you would need to keep updating you Vertex Buffer Objects(VBOs).
Either you can keep adding each new box with a frame or change the Translation Matrices.
Than set the new data in your VBO before drawing.
If you know the maximum size of your data in that case you can build a large VBO and keep updating the data with glBufferSubData.
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
glBufferData(GL_ARRAY_BUFFER, Box.number_of_cuboids() * sizeof(cuboids), &Box[0], GL_DYNAMIC_DRAW);

How can I draw a circle in OpenGL Core 3.3 with orthographic projection?

I'm a complete beginner to OpenGL programming and am trying to follow the Breakout tutorial at learnopengl.com but would like to draw the ball as an actual circle, instead of using a textured quad like Joey suggests. However, every result that Google throws back at me for "draw circle opengl 3.3" or similar phrases seems to be at least a few years old, and using even-older-than-that versions of the API :-(
The closest thing that I've found is this SO question, but of course the OP just had to use a custom VertexFormat object to abstract some of the details, without sharing his/her implementation of such! Just my luck! :P
There's also this YouTube tutorial that uses a seemingly-older version of the API, but copying the code verbatim (except for the last few lines which is where the code looks old) still got me nowhere.
My version of SpriteRenderer::initRenderData() from the tutorial:
void SpriteRenderer::initRenderData() {
GLuint vbo;
auto attribSize = 0;
GLfloat* vertices = nullptr;
// Determine whether this sprite is a circle or
// quad and setup the vertices array accordingly
if (!this->isCircle) {
attribSize = 4;
vertices = new GLfloat[24] {...} // works for rendering quads
} else {
// This code is adapted from the YouTube tutorial that I linked
// above and is where things go pear-shaped for me...or at least
// **not** circle-shaped :P
attribSize = 3;
GLfloat x = 0.0f;
GLfloat y = 0.0f;
GLfloat z = 0.0f;
GLfloat r = 100.0f;
GLint numSides = 6;
GLint numVertices = numSides + 2;
GLfloat* xCoords = new GLfloat[numVertices];
GLfloat* yCoords = new GLfloat[numVertices];
GLfloat* zCoords = new GLfloat[numVertices];
xCoords[0] = x;
yCoords[0] = y;
zCoords[0] = z;
for (auto i = 1; i < numVertices; i++) {
xCoords[i] = x + (r * cos(i * (M_PI * 2.0f) / numSides));
yCoords[i] = y + (r * sin(i * (M_PI * 2.0f) / numSides));
zCoords[i] = z;
}
vertices = new GLfloat[numVertices * 3];
for (auto i = 0; i < numVertices; i++) {
vertices[i * 3] = xCoords[i];
vertices[i * 3 + 1] = yCoords[i];
vertices[i * 3 + 2] = zCoords[i];
}
}
// This is where I go back to the learnopengl.com code. Once
// again, the following works for quads but not circles!
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 24 * sizeof(
GLfloat), vertices, GL_STATIC_DRAW);
glBindVertexArray(vao);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, attribSize, GL_FLOAT, GL_FALSE,
attribSize * sizeof(GLfloat), (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
And here's the SpriteRenderer::DrawSprite() method (the only difference from the original being lines 24 - 28):
void SpriteRenderer::Draw(vec2 position, vec2 size, GLfloat rotation, vec3 colour) {
// Prepare transformations
shader.Use();
auto model = mat4(1.0f);
model = translate(model, vec3(position, 0.0f));
model = translate(model, vec3(0.5f * size.x, 0.5f * size.y, 0.0f)); // Move origin of rotation to center
model = rotate(model, rotation, vec3(0.0f, 0.0f, 1.0f)); // Rotate quad
model = translate(model, vec3(-0.5f * size.x, -0.5f * size.y, 0.0f)); // Move origin back
model = scale(model, vec3(size, 1.0f)); // Lastly, scale
shader.SetMatrix4("model", model);
// Render textured quad
shader.SetVector3f("spriteColour", colour);
glActiveTexture(GL_TEXTURE0);
texture.Bind();
glBindVertexArray(vao);
if (!isCircular) {
glDrawArrays(GL_TRIANGLES, 0, 6);
} else {
glDrawArrays(GL_TRIANGLE_FAN, 0, 24); // also tried "12" and "8" for the last param, to no avail
}
glBindVertexArray(0);
}
And finally, the shaders (different to the ones used for quads):
// Vertex shader
#version 330 core
layout (location = 0) in vec3 position;
uniform mat4 model;
uniform mat4 projection;
void main() {
gl_Position = projection * model *
vec4(position.xyz, 1.0f);
}
// Fragment shader
#version 330 core
out vec4 colour;
uniform vec3 spriteColour;
void main() {
colour = vec4(spriteColour, 1.0);
}
P.S. I know I could just use a quad but I'm trying to learn how to draw all primitives in OpenGL, not just quads and triangles (thanks anyway Joey)!
P.P.S I just realised that the learnopengl.com site has a whole section devoted to debugging OpenGL apps, so I set that up but to no avail :-( I don't think the error handling is supported by my driver (Intel UHD Graphics 620 latest driver) since the GL_CONTEXT_FLAG_DEBUG_BIT was not set after following the instructions:
Requesting a debug context in GLFW is surprisingly easy as all we have to do is pass a hint to GLFW that we'd like to have a debug output context. We have to do this before we call glfwCreateWindow:
glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GL_TRUE);
Once we initialize GLFW we should have a debug context if we're using OpenGL version 4.3 or higher, or else we have to take our chances and hope the system is still able to request a debug context. Otherwise we have to request debug output using its OpenGL extension(s).
To check if we successfully initialized a debug context we can query OpenGL:
GLint flags; glGetIntegerv(GL_CONTEXT_FLAGS, &flags);
if (flags & GL_CONTEXT_FLAG_DEBUG_BIT) {
// initialize debug output
}
That if statement is never entered into!
Thanks to #Mykola's answer to this question I have gotten half-way there:
numVertices = 43;
vertices = new GLfloat[numVertices];
auto i = 2;
auto x = 0.0f,
y = x,
z = x,
r = 0.3f;
auto numSides = 21;
auto TWO_PI = 2.0f * M_PI;
auto increment = TWO_PI / numSides;
for (auto angle = 0.0f; angle <= TWO_PI; angle += increment) {
vertices[i++] = r * cos(angle) + x;
vertices[i++] = r * sin(angle) + y;
}
Which gives me .
Two questions I still have:
Why is there an extra line going from the centre to the right side and how can I fix it?
According to #user1118321's comment on a related SO answer, I should be able to prepend another vertex to the array at (0, 0) and use GL_TRIANGLE_FAN instead of GL_LINE_LOOP
to get a coloured circle. But this results in no output for me :-( Why?

use of texture arrays with freetype in opengl

After having read and run the following tutorial (http://learnopengl.com/code_viewer.php?code=in-practice/text_rendering), I learned how to render text using freetype in OpenGL. I am now wondering if it is possible to avoid calling glDrawArrays for each glyph. Hence I have done some modifications on the VBO to use it for whole string instead of one glyph. As a first step I have used the string "AA" and since both glyphs are identical, they also share the same texture. So it was not an issue to get the following code running:
glGenVertexArrays(1, & textVAO);
glBindVertexArray(textVAO);
glGenBuffers(1, &textVBO);
glBindBuffer(GL_ARRAY_BUFFER,textVBO);
glBufferData(GL_ARRAY_BUFFER, 24* 2* sizeof(float), NULL, GL_DYNAMIC_DRAW);
glVertexPointer( 4, GL_FLOAT, 0, NULL);
followed by:
textShader.Use();
glm::vec3 color;
color = glm::vec3(1.0, 0.7f, 0.9f);
glUniform3f(glGetUniformLocation(textShader.Program, "textColor"), color.x, color.y, color.z);
glBindVertexArray(textVAO);
glActiveTexture(GL_TEXTURE0);
int k=0;
for(c = text.begin(); c != text.end(); c++){
Character ch = Characters[*c];
GLfloat xpos = x + ch.Bearing.x * scale;
GLfloat ypos = y - (ch.Size.y - ch.Bearing.y) * scale;
GLfloat w = ch.Size.x * scale;
GLfloat h = ch.Size.y * scale;
V.block(0,k,24,1) << xpos, ypos + h,0.0,0.0,xpos, ypos,0.0,1.0,xpos + w, ypos,1.0,1.0,xpos, ypos + h,0.0,0.0,xpos + w,ypos,1.0,1.0,xpos + w, ypos+h,1.0,0.0;
k++;
x += ( (ch.Advance >> 6) * scale);
}
glBindTexture(GL_TEXTURE_2D,66);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,textVBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, 24* 2* sizeof(float), V.data());
glDrawArrays(GL_TRIANGLES,0,4*3);
glBindTexture(GL_TEXTURE_2D,0);
glDisableClientState(GL_VERTEX_ARRAY);
glUseProgramObjectARB(0);
I would like to be able to render "AB" or "ZW" on the screen so I am now trying to use a GL_TEXTURE_2D_ARRAY together with glTexImage3D and glTexSubImage3D. Again for simplification I use "AA" in order to have the same width and height for the glyphs. So I added
GLuint textureArray;
glEnable(GL_TEXTURE_2D_ARRAY);
glGenTextures(1, &textureArray);
glBindTexture(GL_TEXTURE_2D_ARRAY, textureArray);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, 4, 30, 35, 2, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
to the first part of the precedent code and the second part has become:
textShader.Use();
glm::vec3 color;
color = glm::vec3(1.0, 0.7f, 0.9f);
glUniform3f(glGetUniformLocation(textShader.Program, "textColor"), color.x, color.y, color.z);
glBindVertexArray(textVAO);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_2D_ARRAY);
for(c = text.begin(); c != text.end(); c++) {
Character ch = Characters[*c];
GLfloat xpos = x + ch.Bearing.x * scale;
GLfloat ypos = y - (ch.Size.y - ch.Bearing.y) * scale;
GLfloat w = ch.Size.x * scale;
GLfloat h = ch.Size.y * scale;
V.block(0,k,24,1) << xpos, ypos + h,0.0,0.0,xpos, ypos,0.0,1.0,xpos + w, ypos,1.0,1.0,xpos, ypos + h,0.0,0.0,xpos + w,ypos,1.0,1.0,xpos + w, ypos+h,1.0,0.0;
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, k, 30,35, 1, GL_RED, GL_UNSIGNED_BYTE, ch.pointeur);
k++;
x += ( (ch.Advance >> 6) * scale);
}
glBindTexture(GL_TEXTURE_2D_ARRAY,textureArray);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,textVBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, 24* 2* sizeof(float), V.data());
glDrawArrays(GL_TRIANGLES,0,4*3);
glBindTexture(GL_TEXTURE_2D,0);
glDisableClientState(GL_VERTEX_ARRAY);
glUseProgramObjectARB(0);
The code compiles, but I don't get anything on the screen. So I wonder if I am using correctly the GL_TEXTURE_2D_ARRAY and also if I have to do an "array version" of the shaders. I am using OpenGL 4.5, Thank you.
There are (at least) two problems.
First, you are using the fixed-function pipeline, which cannot be used with array textures. The texture environment accessing stuff does not know how to handle them. If you want to use array textures, you must use shaders.
Second, even if you were using shaders, you are doing this all wrong. Then again, you were taught wrongly by the tutorial (which in this case is teaching such bad practice that it's actively damaging to OpenGL users). You put each glyph in its own array layer. Well, many OpenGL implementations only support 256 array layers, which is not a lot of glyphs if you want to include non-English text.
The correct way to do glyph rendering is to build a texture atlas of glyphs, rather than using a glyph-per-texture (as the crappy tutorial does) or a glyph-per-array-layer (as you do). You put multiple glyphs in different locations of a single 2D texture, then use texture coordinates to pick which glyph to use. That will allow you to submit entire blocks of text with a single draw call.
2D texture sizes can be upwards of 16K in pixels these days. Even with only 4096x4096 textures, you can fit over 16 thousand 32x32 glyphs into them.
I found another tutorial in two parts, the first part (very similar pedagogically to Joey's tutorial) calling gldraw for each glyph and the second part explaining how to draw a whole string in one call. Here is the link to the second part. It is possible to download the complete code (scroll down the page). Thank you!
https://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Text_Rendering_02

Finding center of image for rotation in opengl

So I have this piece of code, which pretty much draws various 2D textures on the screen, though there are multiple sprites that have to be 'dissected' from the texture (spritesheet). The problem is that rotation is not working properly; while it rotates, it does not rotate on the center of the texture, which is what I am trying to do. I have narrowed it down to the translation being incorrect:
glTranslatef(x + sr->x/2 - sr->w/2,
y + sr->y/2 - sr->h/2,0);
glRotatef(ang,0,0,1.f);
glTranslatef(-x + -sr->x/2 - -sr->w/2,
-y + -sr->y/2 - -sr->h/2,0);
X and Y is the position that it's being drawn to, the sheet rect struct contains the position X and Y of the sprite being drawn from the texture, along with w and h, which are the width and heights of the 'sprite' from the texture. I've tried various other formulas, such as:
glTranslatef(x, y, 0);
The below three switching the negative sign to positive (x - y to x + y)
glTranslatef(sr->x/2 - sr->w/2, sr->y/2 - sr->h/2 0 );
glTranslatef(sr->x - sr->w/2, sr->y - sr->h/2, 0 );
glTranslatef(sr->x - sr->w, sr->y - sr->w, 0 );
glTranslatef(.5,.5,0);
It might also be helpful to say that:
glOrtho(0,screen_width,screen_height,0,-2,10);
is in use.
I've tried reading various tutorials, going through various forums, asking various people, but there doesn't seem to be a solution that works, nor can I find any useful resources that explain to me how I find the center of the image in order to translate it to '(0,0)'. I'm pretty new to OpenGL so a lot of this stuff takes awhile for me to digest.
Here's the entire function:
void Apply_Surface( float x, float y, Sheet_Container* source, Sheet_Rect* sr , float ang = 0, bool flipx = 0, bool flipy = 0, int e_x = -1, int e_y = -1 ) {
float imgwi,imghi;
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D,source->rt());
// rotation
imghi = source->rh();
imgwi = source->rw();
Sheet_Rect t_shtrct(0,0,imgwi,imghi);
if ( sr == NULL ) // in case a sheet rect is not provided, assume it's width
//and height of texture with 0/0 x/y
sr = &t_shtrct;
glPushMatrix();
//
int wid, hei;
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_WIDTH,&wid);
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_HEIGHT,&hei);
glTranslatef(-sr->x + -sr->w,
-sr->y + -sr->h,0);
glRotatef(ang,0,0,1.f);
glTranslatef(sr->x + sr->w,
sr->y + sr->h,0);
// Yeah, out-dated way of drawing to the screen but it works for now.
GLfloat tex[] = {
(sr->x+sr->w * flipx) /imgwi, 1 - (sr->y+sr->h *!flipy )/imghi,
(sr->x+sr->w * flipx) /imgwi, 1 - (sr->y+sr->h * flipy)/imghi,
(sr->x+sr->w * !flipx) /imgwi, 1 - (sr->y+sr->h * flipy)/imghi,
(sr->x+sr->w * !flipx) /imgwi, 1 - (sr->y+sr->h *!flipy)/imghi
};
GLfloat vertices[] = { // vertices to put on screen
x, (y + sr->h),
x, y,
(x +sr->w), y,
(x +sr->w),(y +sr->h)
};
// index array
GLubyte index[6] = { 0,1,2, 2,3,0 };
float fx = (x/(float)screen_width)-(float)sr->w/2/(float)imgwi;
float fy = (y/(float)screen_height)-(float)sr->h/2/(float)imghi;
// activate arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// pass verteices and texture information
glVertexPointer(2, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT, 0, tex);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, index);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glPopMatrix();
glDisable(GL_TEXTURE_2D);
}
Sheet container class:
class Sheet_Container {
GLuint texture;
int width, height;
public:
Sheet_Container();
Sheet_Container(GLuint, int = -1,int = -1);
void Load(GLuint,int = -1,int = -1);
float rw();
float rh();
GLuint rt();
};
Sheet rect class:
struct Sheet_Rect {
float x, y, w, h;
Sheet_Rect();
Sheet_Rect(int xx,int yy,int ww,int hh);
};
Image loading function:
Sheet_Container Game_Info::Load_Image(const char* fil) {
ILuint t_id;
ilGenImages(1, &t_id);
ilBindImage(t_id);
ilLoadImage(const_cast<char*>(fil));
int width = ilGetInteger(IL_IMAGE_WIDTH), height = ilGetInteger(IL_IMAGE_HEIGHT);
return Sheet_Container(ilutGLLoadImage(const_cast<char*>(fil)),width,height);
}
Your quad (two triangles) is centered at:
( x + sr->w / 2, y + sr->h / 2 )
You need to move that point to the origin, rotate, and then move it back:
glTranslatef ( (x + sr->w / 2.0f), (y + sr->h / 2.0f), 0.0f); // 3rd
glRotatef (0,0,0,1.f); // 2nd
glTranslatef (-(x + sr->w / 2.0f), -(y + sr->h / 2.0f), 0.0f); // 1st
Here is where I think you are getting tripped up. People naturally assume that OpenGL applies transformations in the order they appear (top-to-bottom), that is not the case. OpenGL effectively swaps the operands everytime it multiplies two matrices:
M1 x M2 x M3
~~~~~~~
(1)
~~~~~~~~~~
(2)
(1) M2 * M1
(2) M3 * (M2 * M1) --> M3 * M2 * M1 (row-major / textbook math notation)
The technical term for this is post-multiplication, it all has to do with the way matrices are implemented in OpenGL (column-major). Suffice it to say, you should generally read glTranslatef, glRotatef, glScalef, etc. calls from bottom-to-top.
With that out of the way, your current rotation does not make any sense.
You are telling GL to rotate 0 degrees around an axis: <0,0,1> (the z-axis in other words). The axis is correct, but a 0 degree rotation is not going to do anything ;)

Undesirable serration in surface drawing in OpenGL using Vertex Buffer Objects

First off, I am giving a screenshot of the problematically rendered images in opengl. The fourth surface image is drawn by Matlab and it is what the image supposed to look like in Opengl.
.
.
.
Matlab rendering of the dataset:
(First 3 images are the problematic serrated drawing from OpenGL in different angles, and the 4th one is the MATLAB drawn image which is correct)
The image is a 1024 x 1024 complex matrix. Each element's imaginal part is the height of the point (in a 1024x1024 heightmap), and the real part is the colour of the point.
In matlab we have created a small gaussian shaped mountain. In OpenGL it is rendered with rags and serration. The "raggedness" is spread through the entire image.
Moreover, according to the viewing angle of the object, there appears to be region beyond a line where not only a more weird version of serration happens and also the rendered graphics make a height jump/change.
What can cause this? why is this "raggedness" happenning and what is that line? we have run out of all ideas now and will appreaciate any help. Related parts of the VBO code is given below. We basically create a float4 object for a vertex. first, second and third float numbers in the structure correspond the the coordinations of the point. 4th float (treated as 4 one-byte numbers) is the RGBA color.
also note that the complex matrix which contains the heightmap and the color information is stored in the GPU, so there are calls to CUDA in the code. when all the data is dumped into a file, matlab successfully draws the map, so the data is definitely correct.
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
void initGL()
{
...
glViewport(0, 0, window_width, window_height);
glEnable(GL_BLEND);
glEnable(GL_COLOR_MATERIAL);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 15.0);
...
}
void display()
{
camx += camx_v;
camy += camy_v;
camx_v=0;
camy_v=0;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, 1, /* look from camera XYZ */
0, 0, 0, /* look at the origin */
0, 1, 0); /* positive Y up vector */
drawGround();
glTranslatef(camx, camy, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 16, BUFFER_OFFSET(0));
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_UNSIGNED_BYTE, 16, BUFFER_OFFSET(12));
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo_i);
glDrawElements(GL_TRIANGLES, (mesh_width-1) * (mesh_height-1) * 6, GL_UNSIGNED_INT, (GLvoid*)0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glutSwapBuffers();
}
void createVBO(GLuint* vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags)
{
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
unsigned int size = mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cutilSafeCall(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
}
void createIBO(GLuint* vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags, unsigned int numofindice)
{
glGenBuffers(1, vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, *vbo);
unsigned int size = (mesh_width-1) * (mesh_height-1) * numofindice * sizeof(GLuint);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, size, 0, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
cutilSafeCall(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
}
void main()
{
initGL();
createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard);
createIBO(&vbo_i, &cuda_vbo_resource_i, cudaGraphicsMapFlagsWriteDiscard, 6);
glutMainLoop();
}
//KERNEL TO FILL the INDEX BUFFER in GPU, called once at the initialization of the program.
__global__ void fillIBO(unsigned int* pos_i, unsigned int M)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int bi;
if(y<M-1 && x<M-1)
{
bi = ((M-1)*y +x)*6;
//TRI
pos_i[bi++] = x + y*M + 1;
pos_i[bi++] = x + y*M + M + 1;
pos_i[bi++] = x + y*M;
pos_i[bi++] = x + y*M;
pos_i[bi++] = x + y*M + M + 1;
pos_i[bi++] = x + y*M + M;
}
}
replace second triangle by :
pos_i[bi++] = x + y*M + 1;
pos_i[bi++] = x + y*M + M + 1;
pos_i[bi++] = x + y*M + M;
also, I'm pretty sure it should be
bi = (M*y +x)*6;