VBO Rendering Slow - c++

I have been learning VBOs for a couple weeks now, and I have been told here that VBOs can render "~1 million vertices at several hundred fps". However, my current VBO test program only gets around 50 FPS with a little of 1 million vertices to render. Are there ways to optimize VBO efficiency? Or, more likely, am I doing something incorrectly? My test program is here:
EDIT: Improved code based on feedback.
#include <windows.h>
#include <SFML/Graphics.hpp>
#include <iostream>
#include <glew.h>
#include <gl/gl.h>
#include <gl/glu.h>
using namespace std;
float cube_vertices[] = {-1, -1, 1,
1, -1, 1,
1, 1, 1,
-1, 1, 1,
-1, -1, -1,
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, 1, -1,
-1, 1, 1,
1, 1, 1,
1, 1, -1,
-1, -1, -1,
1, -1, -1,
1, -1, 1,
-1, -1, 1,
1, -1, -1,
1, 1, -1,
1, 1, 1,
1, -1, 1,
-1, -1, -1,
-1, -1, 1,
-1, 1, 1,
-1, 1, -1};
float cube_normals[] = {0, 0, 1,
0, 0, 1,
0, 0, 1,
0, 0, 1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
-1, 0, 0,
-1, 0, 0,
-1, 0, 0,
-1, 0, 0};
class Scene {
public:
void setup_projection( int w, int h ) {
glViewport( 0, 0, w, h );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
gluPerspective( 50, (GLdouble)w/(GLdouble)h, 1, 5000.0 );
glMatrixMode( GL_MODELVIEW );
}
};
int main() {
///Number of models to render
int NumberOfCubes = 0;
cout << "Enter number of cubes to render: ";
cin >> NumberOfCubes;
system("cls");
///Create vectors for mesh data
//6 faces * 4 verts * x, y, z * number of cubes
std::vector<float> vertices; vertices.resize(6*4*3*NumberOfCubes);
std::vector<float> normals; normals.resize(6*4*3*NumberOfCubes);
for(int i = 0; i < NumberOfCubes; i++)
{
for(int j = 0; j < 6*4*3; j++)
{
vertices[(i*6*4*3) + j] = cube_vertices[j] + i;
normals[(i*6*4*3) + j] = cube_normals[j];
}
}
///Store size of the vectors
int SizeOfVertices = vertices.size() * sizeof(float);
int SizeOfNormals = normals.size() * sizeof(float);
///Window setup, lighting setup
sf::RenderWindow window(sf::VideoMode(800, 600, 32), "Test");
Scene scene;
scene.setup_projection(window.getSize().x,window.getSize().y);
glewInit();
glEnable(GL_DEPTH_TEST);
glEnable(GL_LIGHTING);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHT0);
float XL = .5, YL = .1, ZL = 1;
GLfloat ambientLight[] = { 0.2f, 0.2f, 0.2f, 1.0f };
GLfloat diffuseLight[] = { 0.8f, 0.8f, 0.8, 1.0f };
GLfloat specularLight[] = { 0.5f, 0.5f, 0.5f, 1.0f };
GLfloat lightpos[] = {XL, YL, ZL, 0.};
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight);
glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight);
glLightfv(GL_LIGHT0, GL_POSITION, lightpos);
///Generate the VBO
GLuint VBOID;
glGenBuffers(1, &VBOID);
glBindBuffer(GL_ARRAY_BUFFER, VBOID);
glBufferData(GL_ARRAY_BUFFER, SizeOfVertices + SizeOfNormals, 0, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, SizeOfVertices, &vertices[0]);
glBufferSubData(GL_ARRAY_BUFFER, SizeOfVertices, SizeOfNormals + SizeOfVertices, &normals[0]);
glBindBuffer(GL_ARRAY_BUFFER, 0);
///FPS Stuff
sf::Clock FPS;
sf::Clock ShowFPS;
float fps;
///Start loop
cout << "Rendering " << NumberOfCubes * 8 << " vertices." << endl;
cout << "Using graphics card: " << glGetString(GL_RENDERER) << endl;
while( window.isOpen() ) {
sf::Event event;
while( window.pollEvent( event ) ) {
if( event.type == sf::Event::Closed )
window.close();
}
fps = FPS.getElapsedTime().asSeconds();
fps = 1 / fps;
FPS.restart();
if(ShowFPS.getElapsedTime().asSeconds() > 1)
{
cout << "FPS: " << fps << "\t FrameTime: " << 1000 / fps << endl;
ShowFPS.restart();
}
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
scene.setup_projection(window.getSize().x,window.getSize().y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(-25, -25, 150, 50, 50, 50, 0, 1, 0);
glBindBuffer(GL_ARRAY_BUFFER, VBOID);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1, 0, 0);
glNormalPointer(GL_FLOAT, 0, 0);
glVertexPointer(3, GL_FLOAT, 0, 0);
glDrawArrays(GL_QUADS, 0, 6*4*NumberOfCubes);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
window.display();
}
return 1;
}

A few remarks on your code:
void Scene::resize( int w, int h ) {
glViewport( 0, 0, w, h );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
gluPerspective( 50, (GLdouble)w/(GLdouble)h, 1, 5000.0 );
glMatrixMode( GL_MODELVIEW );
}
Please do understand that setting the viewport and the projection are not some sort "resizing" operation. They're part of the drawing process and hence should be treated like that. The good thing is, that you call this function with every drawing iteration. But I'd not call it resize. A better name was setup_projection or similar, to make it clear, what this function does, and not, what it does react upon. Always call a function by what it does!
This
cout << endl << endl << "Close program when finished." << endl;
bool ProgramRunning(true);
while(ProgramRunning == true) {}
probably does not work as you might expect. What you close is the console window/terminal; this makes your program to loose its standrd input and process leader thereby terminating it. None of the code after the while loop is going to be executed at all. You could install signal handlers that would set the – so far functionless – ProgrammRunning flag to false.
However the canonical way to deal with this is simply waiting for the user to pause until the user hits the enter key:
cout << "Program execution finished, hit the ENTER key to terminate" << endl;
cin.get();
Now about why you get only 50 FPS: The most likely reason is, that you got V-Sync enabled and your display has a refresh frequency of 50Hz. 50Hz is unusual, but not unheared of. Also likely is, that your display is running at 60Hz, but for some reason you're not making the refresh deadline for each retrace, effectively making your code miss in average every 6th frame.
Another reason may be, that you're not running on the GeForce, but maybe on chipset GPU of your laptop. If you have a Hybrid graphics system, make sure you got all the drivers properly installed and that you switch to the GeForce GPU before executing your program.
Print the output of glGetString(GL_RENDERER); to make sure. After opening the window, creating the OpenGL context add a
cout << glGetString(GL_RENDERER) << endl;

Are you using double buffering? Is yes, then you may have sync to vblank enabled in your drivers. This would mean, that EVERY OpengGL application using double buffering will render at most at the refresh rate of your monitor (usually around 50 - 60Hz).
You can try the same code with (significantly) smaller model to see if your FPS ever goes above this value.

After doing further research, I found out about VBO Indexing and was able to use that to get the several hundred FPS with a million vertices.

Related

How to make C++ OpenGL Camera look left and right?

In this project, I made a 3d cube and added a camera. We can go forward, backward, right or left with the W, A, S, D keys, but we cannot turn right or left. For example, I want to see the back side of the cube, but I cannot see it.Actually this is a simple problem and its solution is simple but my math level is insufficient, it will be better if someone explain it to me.I'll tell you my code and what I've tried below.
#include <Windows.h>
#include <gl/GL.h>
#include <gl/GLU.h>
#include <GLFW/glfw3.h>
#include <cstdio>
#include <iostream>
#include <cmath>
#include <math.h>
int width = 1280;
int height = 720;
float camera_z = 5;
float camera_y = 0;
float camera_x = 0;
float fov = 60;
GLFWwindow* window;
float speed = 0.01;
GLfloat vertices[] = {
-1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1,
1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1,
-1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1,
-1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1,
-1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1,
-1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1
};
GLfloat colors[] = {
1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,
1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,
1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,
1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,
1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0,
1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0
};
void keyboard() {
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) {
glfwSetWindowShouldClose(window, GL_TRUE);
}
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS) {
camera_z = camera_z - speed;
}
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS) {
camera_x = camera_x - speed;
}
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS) {
camera_z = camera_z + speed;
}
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS) {
camera_x = camera_x + speed;
}
if (glfwGetKey(window, GLFW_KEY_Q) == GLFW_PRESS) {
camera_y = camera_y + speed;
}
if (glfwGetKey(window, GLFW_KEY_E) == GLFW_PRESS) {
camera_y = camera_y - speed;
}
if (glfwGetKey(window, GLFW_KEY_LEFT) == GLFW_PRESS) {
// need help
}
if (glfwGetKey(window, GLFW_KEY_RIGHT) == GLFW_PRESS) {
// need help
}
}
void drawCube() {
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glColorPointer(3, GL_FLOAT, 0, colors);
glDrawArrays(GL_QUADS, 0, 24);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
int main(void)
{
/* Initialize the library */
if (!glfwInit())
return -1;
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(width, height, "C++ OpenGL ", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
glEnable(GL_DEPTH_TEST);
/* Loop until the user closes the window */
while (!glfwWindowShouldClose(window))
{
glViewport(0, 0, width, height);
/* Render here */
glClearColor(0.0, 192/256, 1, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
float aspect = (float)width / (float)height;
float fov = 60;
gluPerspective(fov, aspect, 0.1, 1000);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
keyboard();
gluLookAt(
camera_x,
camera_y,
camera_z,
camera_x + 1, // need help
camera_y + 1, // need help
camera_z - fov ,
0,
1,
0
);
glTranslatef(0, 0,-3);
drawCube();
glFlush();
/* Swap front and back buffers */
glfwSwapBuffers(window);
/* Poll for and process events */
glfwPollEvents();
}
glfwTerminate();
return 0;
}
The things I tried were the variables camera_eyex,*_eyey,*_eyez. I added them to the loop to give the camera + 1 value. I used to increase or decrease them when I wanted to go left or right, but there are many errors in this method. For example, even if the camera rotates 45 degrees when I press the W key, it goes straight, so it does not go where I am looking. Also, turning more than 90 degrees to the right or left It's not possible. Also, when I press the left button to go left, turning is getting slower and slower.
You need to build a transform matrix. A transformation usually is a combination of translation, scale and rotation.
Where you first rotate, then scale and then translate (the actual order of calculation - multiplication - is reversed):
translation x scale x rotation
If you want to scale or rotate around a certain point (pivot or center), then you have to translate to the center point and at the end translate back to the origin, like:
translation x center x scale x rotation x -center
The lookAt algorithm sets the rotation and translation based on the parameters (eye, target, up), whereas your goal is to separate the rotation from the translation. Therefore you have to build your own transformation, e.g.:
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//set the position of the camera
glTranslate(pos);
//set the scale or rotation pivot
//glTranslate(center);
//scale
//glScale(scale);
//rotate around z-axis first
glRotate(z_angle, 0, 0, 1);
//rotate around y-axis
glRotate(y_angle, 0, 1, 0);
//rotate around x-axis
glRotate(x_angle, 1, 0, 0);
//set the center back, if set before
//glTranslate(-center);
You'll find more info here (OpenGl related, although 'modern' OpenGl): https://learnopengl.com/Getting-started/Transformations
Note: You probably have to adjust the input 'W A S D' to the changed axises. If, for example the 'W' key adjusts the z-component, you will probably not go 'forward' (along the z-axis) with the setup above. In order to move according to the transformed axises, you'll need the transformation matrix and extract them. Better is to use a math library, e.g. glm. How a orbit camera could be implemented, again, have a look at: https://learnopengl.com/Getting-started/Camera
Building a camera system, needs some theoretical background in linear algebra.
Based on the comment section below, i list all relevant links here:
Dot product
Cross product
Translation matrix
Rotation matrix
Matrix multiplication
On top of that, it can be overwhelming for those who never visited a course in linear algebra, therefore i highly recommend The Essence of linear algebra by 3Blue1Brown.
A note on matrix multiplication, what is not adequate emphasized and mostly overlooked: Once one has internalized the geometric meaning of a dot product, and the geometric meaning of the rows and columns of a matrix, one should also note that each component of the resulting matrix is the result of the dot product of a row and a column vector, visualize that and internalize it.

gluLookAt() has no effect in OpenGL

I'm trying to look at the square from the other side using the gluLookAt() function.
After using the function, nothing changes, although I expected that the corners of the square will change.
I set the camera point to the rightmost part of the world and look at its center, where the square is located.
He had to stretch out to the sides. Why hasn't anything changed?
Code:
#include "includes.h"
using namespace std;
constexpr auto FPS_RATE = 60;
int windowHeight = 600, windowWidth = 600, windowDepth = 600;
void init();
void idleFunction();
void displayFunction();
double getTime();
double getTime()
{
using Duration = std::chrono::duration<double>;
return std::chrono::duration_cast<Duration>(
std::chrono::high_resolution_clock::now().time_since_epoch()
).count();
}
const double frame_delay = 1.0 / FPS_RATE;
double last_render = 0;
void init()
{
glutDisplayFunc(displayFunction);
glutIdleFunc(idleFunction);
glViewport(0, 0, windowWidth, windowHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-windowWidth / 2, windowWidth / 2, -windowHeight / 2, windowHeight / 2, -windowDepth / 2, windowDepth / 2);
glClearColor(0.0, 0.0, 0.0, 0.0);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
}
void idleFunction()
{
const double current_time = getTime();
if ((current_time - last_render) > frame_delay)
{
last_render = current_time;
glutPostRedisplay();
}
}
void displayFunction()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_POLYGON);
gluLookAt(-300, 0, 0,
0, 0, 0,
0, 1, 0);
glColor3f(1, 1, 1);
glVertex3i(-150, 150, 0);
glVertex3i(150, 150, 0);
glVertex3i(150, -150, 0);
glVertex3i(-150, -150, 0);
glEnd();
glutSwapBuffers();
}
int main(int argc, char* argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(windowWidth, windowHeight);
glutInitWindowPosition((GetSystemMetrics(SM_CXSCREEN) - windowWidth) / 2, (GetSystemMetrics(SM_CYSCREEN) - windowHeight) / 2);
glutCreateWindow("Window");
init();
glutMainLoop();
return 0;
}
The issue is caused because gluLookAt() is call with in a glBegin/glEnd sequence. This is not allowed. You've to call gluLookAt before glBegin.
Once drawing of primitives was started by glBegin it is only allowed to specify vertex coordinates (glVertex) and change attributes (e.g. glColor, glTexCoord ...), till the drawn is ended (glEnd).
All other instruction will be ignored and cause a GL_INVALID_OPERATION error (error code 1282).
Further note, that glLookAt doesn't set a the current matrix. It defines a matrix and multiplies the current matrix by the new matrix. Set the matrix mode (glMatrixMode) and set Identity matrix by glLoadIdentity before gluLookAt.
With the view matrix
gluLookAt(-300, 0, 0, 0, 0, 0, 0, 1, 0);
you want "see" anything, because with that matrix the line of sight is set along the x-axis and you look at the 2 dimensional polygon from the side.
Note, the polygon is a 2D object. The size of the object appears different if you look at it from the front, from the side (then it is a line and not visible) or from an direction in between. The first 3 parameters of gluLookAt define the point of view the next 3 parameters define the point you look at. The vector from the point of view to the point you look at is the line of sight.
Probably yo want look along the z-axis:
void displayFunction()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, -300, 0, 0, 0, 0, 1, 0);
glBegin(GL_POLYGON);
glColor3f(1, 1, 1);
glVertex3i(-150, 150, 0);
glVertex3i(150, 150, 0);
glVertex3i(150, -150, 0);
glVertex3i(-150, -150, 0);
glEnd();
glutSwapBuffers();
}
You use Orthographic (parallel) projection. If you would use Perspective projection, then the projected size of the object would decrease, when the distance to the point of view increases. Perspective projection can be set by gluPerspective. e.g.:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(90.0, (double)windowWidth / windowHeight, 0.1, 600.0);

Wrong texture is used when running this in Windows (7 & 10 tested) without graphics drivers installed/in safe mode (OpenGL 1.1 used) is it a bug?

I'm writing something that I'm wanting to work on as much hardware as possible, but have found an issue where I don't believe that the fault is with my code. can anyone confirm?
UPDATE - also happens on windows 7, and when booting in safe mode to force no drivers, both are:
OpenGL Version: - 1.1.0,
Vendor: - Microsoft Corporation,
Renderer: - GDI Generic
when running on windows 10 with an older graphics card, microsoft opengl 1.1 driver is used. yeah, i know that shaders are the way to go, and that older cards aren't actually supported on windows 10, but since when did that stop anyone... etc. but like i said, I'm wanting maximum compatibility. Code that demonstrates the issue is included:-
//link library
//opengl32
//linker options
//-lmingw32 -lSDL2main -lSDL2
//include SDL2 dirs-
//compiler-
//SDL2-2.0.4\i686-w64-mingw32\include\SDL2
//linker-
//SDL2-2.0.4\i686-w64-mingw32\lib
#include <SDL.h>
#include <SDL_opengl.h>
void draw();
bool fix = false;
bool wire = false;
GLuint tex1 = 0;
GLuint tex2 = 0;
int main(int argc, char *argv[])
{
SDL_Init(SDL_INIT_VIDEO|SDL_INIT_TIMER);
SDL_Window* mywindow = SDL_CreateWindow("press q or w to toggle fixes...",100,100,640,480,SDL_WINDOW_OPENGL|SDL_WINDOW_SHOWN);
SDL_GL_CreateContext(mywindow);
glEnable(GL_TEXTURE_2D);
bool loop = true;
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
while (loop)
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
if (event.type == SDL_QUIT)
loop = false;
if (event.type == SDL_KEYDOWN)
{
switch (event.key.keysym.sym)
{
case SDLK_ESCAPE:
loop = false;
break;
case SDLK_q:
fix = !fix;
break;
case SDLK_w:
if(wire)
{
glPolygonMode(GL_FRONT, GL_FILL);
}
else
{
glPolygonMode(GL_FRONT, GL_LINE);
}
wire = !wire;
break;
default:
break;
}
}
}
draw();
SDL_GL_SwapWindow(mywindow);
}
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDeleteTextures(1, &tex1);
glDeleteTextures(1, &tex2);
return 0;
}
void texturator(GLuint* tex, unsigned char r, unsigned char g, unsigned char b)
{
if(!(*tex))
{
glGenTextures(1, tex) ;
glBindTexture(GL_TEXTURE_2D, *tex);
unsigned char text[4*4*4];//4*w*h
for(unsigned int x = 0;x<4;x++)
{
for(unsigned int y = 0;y<4;y++)
{
unsigned char* pix = &(text[((y*4)+x)*4]);
pix[0] = x*r;
pix[1] = y*g;
pix[2] = x*b;
pix[3] = 255;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, text);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
}
else
{
if(fix)
{
glBindTexture(GL_TEXTURE_2D, 0);
}
glBindTexture(GL_TEXTURE_2D, *tex);
}
}
void draw()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
GLfloat mat1[] = {1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1};
glLoadMatrixf(mat1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GLfloat verts[] = {0.0f,0.0f,0.0f, 1.0f,0.0f,0.0f, 1.0f,1.0f,0.0f, 0.0f,1.0f,0.0f};
GLfloat uvs[] = {0.0f,0.0f, 1.0f,0.0f, 1.0f,1.0f, 0.0f,1.0f};
glVertexPointer(3, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, uvs);
texturator(&tex1,0,60,60);
GLuint elem[] = {0,1,2, 2,3,0};
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, elem);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
GLfloat mat2[] = {1, 0, 0, 0, -0, 1, 0, 0, 0, 0, 1, 0, -1, 0, 0, 1};
glLoadMatrixf(mat2);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
texturator(&tex2,60,10,0);
glTexCoordPointer(2, GL_FLOAT, 0, uvs);
glVertexPointer(3, GL_FLOAT, 0, verts);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
from the second frame on, tex2 will continue to be displayed for both draws, even though gdebugger says that tex1 is bound. binding 0 before the new bind fixes this. switching to wireframe instead of fill mode also, curiously, fixes it, whilst it is still drawn as wireframe. (use q and w keys to toggle these).
can anyone shed any light on this? is it just microsoft's implementation being..............
and is there a better way to include compatibility with older stuff?
cheers!!

Do I need Bind Pose Bone Transformation for my mesh Animation?

I have a Hand mesh which I want to animate.
I have the Skeleton which can be hierarchically animated.
My mesh is also weighted in Blender. So each vertex has 4 associated bones to be affected by.
When I apply the Animation of my Skeleton to the mesh, the hierarchy is applied correctly. (so the hierarchy of the mesh, matches the hierarchy of the Skeleton).
So far so good, now question:
the fingers look to be stretched (its like the fingers smashed by a heavy door). Why?
Note: (I didnt apply the bind pose bone Transformation Matrix explicitly, but I read about it and I believe its functionality is there, in the hierarchical Transformation I have for my Skeleton).
If you need more clarification of the steps, please ask.
vector<glm::mat4> Posture1Hand::HierarchyApplied(HandSkltn HNDSKs){
vector <glm::mat4> Matrices;
Matrices.resize(HNDSKs.GetLimbNum());
//non Hierarchical Matrices
for (unsigned int i = 0; i < Matrices.size(); i++){
Matrices[i] = newPose[i].getModelMatSkltn(HNDSKs.GetLimb(i).getLwCenter());
}
for (unsigned int i = 0; i < Matrices.size(); i++){
vector<Limb*>childeren = HNDSKs.GetLimb(i).getChildren();
for (unsigned int j = 0; j < childeren.size(); j++){
Matrices[childeren[j]->getId()] = Matrices[i] * Matrices[childeren[j]->getId()];
}
}
return Matrices;
}
Here is my getModelMatSkltn method.
inline glm::mat4 getModelMatSkltn(const glm::vec3& RotationCentre) const{//to apply the rotation on the whole heirarchy
glm::mat4 posMatrix = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
posMatrix = glm::translate(posMatrix, newPos);
glm::mat4 trMatrix = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
glm::mat4 OriginTranslate = glm::translate(trMatrix, -RotationCentre);
glm::mat4 InverseTranslate = glm::translate(trMatrix, RotationCentre);
glm::mat4 rotXMatrix = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
rotXMatrix = glm::rotate(rotXMatrix, glm::radians(newRot.x), glm::vec3(1, 0, 0));
glm::mat4 rotYMatrix = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
rotYMatrix = glm::rotate(rotYMatrix, glm::radians(newRot.y), glm::vec3(0, 1, 0));
glm::mat4 rotZMatrix = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
rotZMatrix = glm::rotate(rotZMatrix, glm::radians(newRot.z), glm::vec3(0, 0, 1));
glm::mat4 scaleMatric = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
scaleMatric = glm::scale(scaleMatric, newScale);
glm::mat4 rotMatrix = rotZMatrix*rotYMatrix*rotXMatrix;
rotMatrix = InverseTranslate*rotMatrix*OriginTranslate;
return posMatrix*rotMatrix*scaleMatric;
}
and this is how I send 20 transformation Matrix (because of 20 joints in Hand) to GPU:
void GLShader::Update(const vector trMat, const GLCamera& camera){
vector<glm::mat4> MVP; MVP.resize(trMat.size());
for (unsigned int i = 0; i < trMat.size(); i++){
MVP[i] = camera.getViewProjection()* trMat[i];
}
glUniformMatrix4fv(newUniform[TRANSFORM_U], trMat.size(), GL_FALSE, &MVP[0][0][0]);//4 floating value
}
I guess one should be familiar with calculation of vertex position in the shader in order to be able to answer the question, but I send a part of my vertex shader too.
attribute vec3 position;
attribute vec2 texCoord;
attribute vec4 weight;
attribute vec4 weightInd;
uniform mat4 transform[20];//vector of uniform for 20 number of joints in my skleton
void main(){
mat4 WMat;//weighted matrix
float w;
int Index;
for (int i=0; i<4; i++){
Index=int(weightInd[i]);
w=weight[i];
WMat += w*transform[Index];
}
gl_Position= WMat*vec4(position, 1.0);
}

does anyone have an example of PBO + SDL2.0 + OpenGL

Does any one have a working example of PBO's (Pixel Buffer Objects) + SDL2.0 (Simple DirectMedia Layer) + OpenGL?
The reasons is to get asynchronous GPU -> CPU downloading with GLGetPixels and thus get a performance boost.
Here's my attempt. There's no measured difference at all with use_pbo = false or true. And I've used PBOs with GLUT before on the same machine so I know that my hardware supports it.
I've looked a lot at the http://www.songho.ca/opengl/gl_pbo.html tutorial but don't see that i'm doing anything wrong.
bool use_pbo = true; //remember to wait a bit for fps to pick up speed
if(SDL_Init(SDL_INIT_EVENTS) != 0) throw "SDL_Init";
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
sdl_window = SDL_CreateWindow("", 10, 20, window_width, window_height, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
sdl_gl_context = SDL_GL_CreateContext(sdl_window);
SDL_GL_MakeCurrent(sdl_window, sdl_gl_context);
vertical_sync(false);
{
glGenBuffers(2, pbos);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[0]);
glBufferData(GL_PIXEL_PACK_BUFFER, pbo_width*pbo_height*4, 0, GL_STREAM_READ);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[1]);
glBufferData(GL_PIXEL_PACK_BUFFER, pbo_width*pbo_height*4, 0, GL_STREAM_READ);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
//render to default frame buffer
{
glDrawBuffer(GL_BACK);
glClearColor(0.1, 0.2, 0.3, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glUseProgram(simple_shader);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, transparent_texture);
glBegin(GL_TRIANGLE_STRIP);
//textcoord, color, position
glVertexAttrib2f(2, 0, 1); glVertexAttrib4f(1, 1, 0, 0, 1); glVertexAttrib2f(0, -1, -1); //bottom left
glVertexAttrib2f(2, 1, 1); glVertexAttrib4f(1, 0, 1, 0, 1); glVertexAttrib2f(0, +1, -1); //bottom right
glVertexAttrib2f(2, 0, 0); glVertexAttrib4f(1, 0, 0, 1, 1); glVertexAttrib2f(0, -1, +1); //top left
glVertexAttrib2f(2, 1, 0); glVertexAttrib4f(1, 1, 1, 0, 1); glVertexAttrib2f(0, +1, +1); //top right
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
glUseProgram(0);
glDisable(GL_BLEND);
while(true)
{
if(use_pbo)
{
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[1]);
GLvoid* map_buffer_data = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if(map_buffer_data)
{
//glRasterPos2i(-1, -1); //default, left bottom
glRasterPos2i(0, -1);
glDrawPixels(pbo_width, pbo_height, GL_BGRA, GL_UNSIGNED_BYTE, map_buffer_data);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
else
{
glRasterPos2i(0, -1); //right bottom
glDrawPixels(pbo_width, pbo_height, GL_BGRA, GL_UNSIGNED_BYTE, t.c);
}
}
}
SDL_GL_SwapWindow(sdl_window);
//cycle buffers
{
GLuint t = pbos[1];
pbos[1] = pbos[0];
pbos[0] = t;
}
Try to bind PBO to your texture
GL_PIXEL_UNPACK_BUFFER for pixel data being passed into OpenGL GL_PIXEL_PACK_BUFFER for pixel data being retrieved from OpenGL
Also you should define texture image with glTexImage2D
If a non-zero named buffer object is bound to the GL_PIXEL_UNPACK_BUFFER target
(see glBindBuffer) while a texture image is
specified, data is treated as a byte offset into the buffer object's data store.