So far as i have understood the vertex fetch stage is encapsulated by the VAO and the VAO is required to contain the vertex fetch stage state for piping between the buffer objects and vertex attributes as well as formatting the data in the buffer objects.
Both books that i have been reading on the subject i.Red book, Blue book both mention explicitly that the VAO must contain the vertex fetch stage state data
However when i actually create 2 texture objects and simply format the data once WITHOUT a VAO into which to store this information about the buffer, it still runs fine without any hiccups, and then i reload the first object back again, and again it works fine without any issues, so where is this information pulled from about the formatting of the data in the buffer object?
I even upload buffer data a second time to same buffer object which would imply that previous information held there would be reset? And the picture still renders fine to the window
So what exactly is going on? the Books say one thing, what happens in reality is totally different and opposite
Can somebody actually explain what IS actually needed here and what isnt? What is actually going on?
When do we actually need a VAO and when we can do without?
What's the point of extra code processing when it is not needed?
The code below:
int main(){
int scrW=1280, scrH=720;
//create context and shader program
init(scrW, scrH);
createShaders();
//create texture objects and load data from image to server memory
char object[2][25];
strcpy(object[0], "back.bmp");
strcpy(object[1], "256x256.bmp");
//triangle 1
GLfloat vertices[] =
// X Y U V
{ -1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 0.0,
1.0, 1.0, 1.0, 1.0,
-1.0, 1.0, 0.0, 1.0};
//glPointSize(40.0f);
//create and bound vertex buffer object(memory buffers)
GLuint vbo1 = createVbo();
//The state set by glVertexAttribPointer() is stored in the currently bound vertex array object (VAO) if vertex array object bound
//associates the format of the data for the currently bound buffer object to the vertex attribute so opengl knows how much and how to read it
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), 0);
glEnableVertexAttribArray(0);
//shader vertex attribute for texture coordinates
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), (const GLvoid*)(2 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
//upload vertices to buffer memory
//will upload data to currently bound/active buffer object
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
//load and create texture object from image data
GLuint tex1 = createTexture(object[0]);
glDrawArrays(GL_QUADS, 0, 4);
glXSwapBuffers ( dpy, glxWin );
sleep(3);
GLuint tex2 = createTexture(object[1]);
glDrawArrays(GL_QUADS, 0, 4);
glXSwapBuffers ( dpy, glxWin );
sleep(3);
glBindTexture(GL_TEXTURE_2D, tex1);
glDrawArrays(GL_QUADS, 0, 4);
glXSwapBuffers ( dpy, glxWin );
sleep(3);
//////////////de-initialize
glXMakeContextCurrent( dpy, 0, 0, NULL );
glXDestroyContext( dpy, context );
glXDestroyWindow(dpy, glxWin);
XDestroyWindow( dpy, win );
XCloseDisplay( dpy );
return 0;
}
and the shaders
const char* vertex_shader =
"#version 400\n"
"layout(location = 0) in vec2 vp;"
"layout(location = 1) in vec2 tex;"
"out vec2 texCoord;"
"void main () {"
" gl_Position = vec4 (vp, 0.0f, 1.0f);"
" texCoord = tex; "
"}";
const char* fragment_shader =
"#version 400\n"
"uniform sampler2D s;"
"in vec2 texCoord;"
"out vec4 color;"
"void main () {"
"color = texture(s, texCoord);"
"}";
in order to avoid any confusion , here is the init() procedure
static int att[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
//GLX_SAMPLE_BUFFERS , 1,
//GLX_SAMPLES , 4,
None
};
Display *dpy;
Window root;
XVisualInfo *vi;
Colormap cmap;
XSetWindowAttributes swa;
Window win;
GLXContext context;
GLXFBConfig *fbc;
GLXWindow glxWin;
int fbcount;
void init(int width, int height){
//set and choose displays for creating window
dpy = XOpenDisplay(NULL);
if (!dpy){
printf("Failed to open X display\n");
exit(1);
}
root = DefaultRootWindow(dpy);
//request a framebuffer configuration
fbc = glXChooseFBConfig(dpy, DefaultScreen(dpy), att, &fbcount);
if (!fbc){
printf( "Failed to retrieve a framebuffer config\n" );
exit(1);
}
vi = glXGetVisualFromFBConfig( dpy, fbc[0] );
if(vi==NULL){
printf("Error getting visual info\n");
exit(1);
}
swa.colormap = XCreateColormap( dpy, RootWindow( dpy, vi->screen ), vi->visual, AllocNone );
swa.background_pixmap = None ;
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
//Window XCreateWindow(display, parent, x, y, width, height, border_width, depth, class, visual, valuemask, attributes)
win = XCreateWindow( dpy, RootWindow( dpy, vi->screen ), 0, 0, width, height, 0, vi->depth, InputOutput, vi->visual, CWBorderPixel|CWColormap|CWEventMask, &swa );
if ( !win ){
printf( "Failed to create window.\n" );
exit(1);
}
context = glXCreateNewContext( dpy, fbc[0], GLX_RGBA_TYPE, NULL, True );
glxWin = glXCreateWindow(dpy, fbc[0], win, NULL);
XMapWindow(dpy, win);
glXMakeContextCurrent(dpy, glxWin, glxWin, context);
// start GLEW extension handler
glewExperimental = GL_TRUE;
GLuint err = glewInit();
if(err!=GLEW_OK){
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
exit(1);
}
XSelectInput(dpy, win, ButtonPressMask|KeyPressMask);
// tell GL to only draw onto a pixel if the shape is closer to the viewer
//glEnable (GL_DEPTH_TEST); // enable depth-testing
//glDepthFunc (GL_LESS); // depth-testing interprets a smaller value as "closer"
}
If you use a compatibility OpenGL context, you don't need a VAO. In a sense, there is a "default" VAO which is always bound. This is how it works in OpenGL 2.x, and this is part of what the "compatibility" means in "compatibility profile".
In you use a core OpenGL context, you do need a VAO. If you don't, your code simply won't work. If you want to continue pretending you don't need a VAO, you can create a single VAO and have it bound for the entire duration of your program.
The issue of choosing a core vs compatibility profiles has its nuances, but in general it is recommended to request a core profile if you are developing a new program. Not all systems have great support for compatibility profiles anyway. Mesa limits compatibility profiles to 3.0 and OS X limits them to 2.1. If you want a core profile, you have to explicitly request a core profile when you create the context.
Related
This question already has answers here:
OpenGL: Performance of drawing a single gigantic primative
(2 answers)
Texture partially off screen - performance difference
(1 answer)
Closed 6 months ago.
Basically my OpenGL code reads from a 2d texture 256 * 256 image and writes from its data to a 2d generated triangle on screen
my question is what actually happens to the data that was not drawn or not visible and does the fragment shader access those part of the data that will not be draw to screen or does it avoid it completely?
for example for the tip of the triangle(or vertex) does it only take that information for that one fragment at the tip of the triangle whilst ignoring the remaining length of the square or does it actually go through all the line of the length of the image data(i.e 256 px) and print them elsewhere etc
is it discarded or is it actually drawn outside of the triangle to elsewhere but not made visible?
Can i actually access this data and drawn it to another triangle or another place on screen? so that i break up the texture image over two or three primitives
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
#include <unistd.h>
#include <X11/Xlib.h>
//#include <X11/Xutil.h>
#include <GL/glew.h>
#include <GL/glx.h>
#include <time.h>
#include <math.h>
const char* vertex_shader =
"#version 400\n"
"layout(location = 0) in vec2 vp;"
"layout(location = 1) in vec2 tex;"
"out vec2 texCoord;"
"void main () {"
" gl_Position = vec4 (vp, 0.0f, 1.0f);"
" texCoord = tex; "
"}";
const char* fragment_shader =
"#version 400\n"
"uniform sampler2D s;"
"in vec2 texCoord;"
"out vec4 color;"
"void main () {"
"color = texture(s, texCoord);"
"}";
static int att[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
//GLX_SAMPLE_BUFFERS , 1,
//GLX_SAMPLES , 4,
None
};
Display *dpy;
Window root;
XVisualInfo *vi;
Colormap cmap;
XSetWindowAttributes swa;
Window win;
GLXContext context;
GLXFBConfig *fbc;
GLXWindow glxWin;
GLuint vs;
GLuint fs;
GLuint shader_program;
unsigned char * loadData(){
unsigned char header[54];
unsigned int width=256, height=256;
unsigned int imageSize = width*height * 3;
unsigned char * data = (unsigned char *)malloc(imageSize);
FILE *file = fopen("256x256.bmp", "rb");
if(!file){printf("Failed to open\n"); return 0;}
fread(header, 1, 54, file);
fread(data, 1, imageSize, file );
fclose(file);
return data;
}
void createShaders(){
GLint result;
GLsizei log_length;
GLchar data[255];
GLchar data2[255];
vs = glCreateShader (GL_VERTEX_SHADER);
glShaderSource (vs, 1, &vertex_shader, NULL);
glCompileShader (vs);
glGetShaderiv(vs, GL_COMPILE_STATUS,&result);
if(result == GL_FALSE){
glGetShaderiv(vs, GL_INFO_LOG_LENGTH, &log_length);
glGetShaderInfoLog(vs, log_length, NULL, data );
printf("vertex shader %s\n", data);
}
fs = glCreateShader (GL_FRAGMENT_SHADER);
glShaderSource (fs, 1, &fragment_shader, NULL);
glCompileShader (fs);
glGetShaderiv(fs, GL_COMPILE_STATUS,&result);
if(result == GL_FALSE){
glGetShaderiv(fs, GL_INFO_LOG_LENGTH, &log_length);
glGetShaderInfoLog(fs, log_length, NULL, data2 );
printf("fragment shader %s\n", data2);
}
shader_program = glCreateProgram ();
glAttachShader (shader_program, fs);
glAttachShader (shader_program, vs);
glLinkProgram (shader_program);
glUseProgram (shader_program);
}
void init(){
dpy = XOpenDisplay(NULL);
int fbcount;
if (!dpy){
printf("Failed to open X display\n");
exit(1);
}
root = DefaultRootWindow(dpy);
//request a framebuffer configuration
fbc = glXChooseFBConfig(dpy, DefaultScreen(dpy), att, &fbcount);
if (!fbc){
printf( "Failed to retrieve a framebuffer config\n" );
exit(1);
}
vi = glXGetVisualFromFBConfig( dpy, fbc[0] );
if(vi==NULL){
printf("Error getting visual info\n");
exit(1);
}
swa.colormap = XCreateColormap( dpy, RootWindow( dpy, vi->screen ), vi->visual, AllocNone );
swa.background_pixmap = None ;
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
//Window XCreateWindow(display, parent, x, y, width, height, border_width, depth, class, visual, valuemask, attributes)
win = XCreateWindow( dpy, RootWindow( dpy, vi->screen ), 0, 0, 640, 480, 0, vi->depth, InputOutput, vi->visual, CWBorderPixel|CWColormap|CWEventMask, &swa );
if ( !win ){
printf( "Failed to create window.\n" );
exit(1);
}
context = glXCreateNewContext( dpy, fbc[0], GLX_RGBA_TYPE, NULL, True );
glxWin = glXCreateWindow(dpy, fbc[0], win, NULL);
XMapWindow(dpy, win);
glXMakeContextCurrent(dpy, glxWin, glxWin, context);
// start GLEW extension handler
glewExperimental = GL_TRUE;
GLuint err = glewInit();
if(err!=GLEW_OK){
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
exit(1);
}
XSelectInput(dpy, win, ButtonPressMask|KeyPressMask);
// tell GL to only draw onto a pixel if the shape is closer to the viewer
//glEnable (GL_DEPTH_TEST); // enable depth-testing
//glDepthFunc (GL_LESS); // depth-testing interprets a smaller value as "closer"
}
int main(){
init();
createShaders();
unsigned char * data = loadData();
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 256, 256, 0, GL_BGR,
GL_UNSIGNED_BYTE, data);
GLuint vbo;
GLuint vao;
float s = 256.0/640.0, r =256.0/480.0 ;
int k = 1;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLfloat vertices[] =
// x y U V
{-1.0*s, -1.0*r, 0.0, 0.0,
0.0*s, 1.0*r, 0.5, 1.0,
1.0*s, -1.0*r, 1.0, 0.0};
// -1.0*s/k, 1.0*r/k, 0.0, 1.0 };
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), 0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), (const GLvoid*)(2 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
//glPointSize(40.0f);
glDrawArrays(GL_TRIANGLES, 0, 3);
glXSwapBuffers ( dpy, glxWin );
sleep(6);
//////////////de-initialize
free(data);
glXMakeContextCurrent( dpy, 0, 0, NULL );
glXDestroyContext( dpy, context );
glXDestroyWindow(dpy, glxWin);
XDestroyWindow( dpy, win );
XCloseDisplay( dpy );
return 0;
}
So the above is my code and it renders correctly and produces a simple render of a 2d image that is 256 * 256 to a triangle on screen
The UVs that you provide to your geometry define what pieces of your texture are drawn. It is entirely up to the hardware to define how that texture gets plastered on to your geometry, which is why I won't comment on:
is it discarded or is it actually drawn outside of the triangle to elsewhere but not made visible?
However, you can assume that the driver interpolates over the UVs that you give, according to which part of the geometry maps to the screen pixel that is currently being drawn, and figures out what part(s) of the texture need to be used and applied to the screen pixel. In other words, it would only use the parts of the texture that are actually on the geometry.
If you want to draw this texture onto additional pieces of geometry, you should bind this texture when you draw those shapes and provide the correct UVs for them.
On most hardware, it's the graphic rendering chip that clips the triangles at the frame buffer borders (or the clipping region). The geometry and fragments outside the frame buffer are discarded as early as possible while rendering.
The fragment shader is not called for these fragments outside the buffer.
1) Do I understand correctly that to draw using vertex arrays or VBOs I need for all my attributes to either call glBindAttribLocation before the shader program linkage or call glGetAttribLocation after the shader program was successfully linked and then use the bound/obtained index in the glVertexAttribPointer and glEnableVertexAttribArray calls?
To be more specific: these three functions - glGetAttribLocation, glVertexAttribPointer and glEnableVertexAttribArray - they all have an input parameter named "index". Is it the same "index" for all the three? And is it the same thing as the one returned by glGetAttribLocation?
If yes:
2) I've been facing a problem on OS X, I described it here: https://stackoverflow.com/questions/28093919/using-default-attribute-location-doesnt-work-on-osx-osx-opengl-bug , but unfortunately didn't get any replies.
The problem is that depending on what attribute locations I bind to my attributes I do or do not see anything on the screen. I only see this behavior on my MacBook Pro with OS X 10.9.5; I've tried running the same code on Linux and Windows and it seems to work on those platforms independently from which locations are my attributes bound to.
Here is a code example (which is supposed to draw a red triangle on the screen) that exhibits the problem:
#include <iostream>
#include <GLFW/glfw3.h>
GLuint global_program_object;
GLint global_position_location;
GLint global_aspect_ratio_location;
GLuint global_buffer_names[1];
int LoadShader(GLenum type, const char *shader_source)
{
GLuint shader;
GLint compiled;
shader = glCreateShader(type);
if (shader == 0)
return 0;
glShaderSource(shader, 1, &shader_source, NULL);
glCompileShader(shader);
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
GLint info_len = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &info_len);
if (info_len > 1)
{
char* info_log = new char[info_len];
glGetShaderInfoLog(shader, info_len, NULL, info_log);
std::cout << "Error compiling shader" << info_log << std::endl;
delete info_log;
}
glDeleteShader(shader);
return 0;
}
return shader;
}
int InitGL()
{
char vertex_shader_source[] =
"attribute vec4 att_position; \n"
"attribute float dummy;\n"
"uniform float uni_aspect_ratio; \n"
"void main() \n"
" { \n"
" vec4 test = att_position * dummy;\n"
" mat4 mat_projection = \n"
" mat4(1.0 / uni_aspect_ratio, 0.0, 0.0, 0.0, \n"
" 0.0, 1.0, 0.0, 0.0, \n"
" 0.0, 0.0, -1.0, 0.0, \n"
" 0.0, 0.0, 0.0, 1.0); \n"
" gl_Position = att_position; \n"
" gl_Position *= mat_projection; \n"
" } \n";
char fragment_shader_source[] =
"void main() \n"
" { \n"
" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0); \n"
" } \n";
GLuint vertex_shader;
GLuint fragment_shader;
GLuint program_object;
GLint linked;
vertex_shader = LoadShader(GL_VERTEX_SHADER , vertex_shader_source );
fragment_shader = LoadShader(GL_FRAGMENT_SHADER, fragment_shader_source);
program_object = glCreateProgram();
if(program_object == 0)
return 1;
glAttachShader(program_object, vertex_shader );
glAttachShader(program_object, fragment_shader);
// Here any index except 0 results in observing the black screen
glBindAttribLocation(program_object, 1, "att_position");
glLinkProgram(program_object);
glGetProgramiv(program_object, GL_LINK_STATUS, &linked);
if(!linked)
{
GLint info_len = 0;
glGetProgramiv(program_object, GL_INFO_LOG_LENGTH, &info_len);
if(info_len > 1)
{
char* info_log = new char[info_len];
glGetProgramInfoLog(program_object, info_len, NULL, info_log);
std::cout << "Error linking program" << info_log << std::endl;
delete info_log;
}
glDeleteProgram(program_object);
return 1;
}
global_program_object = program_object;
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glUseProgram(global_program_object);
global_position_location = glGetAttribLocation (global_program_object, "att_position");
global_aspect_ratio_location = glGetUniformLocation(global_program_object, "uni_aspect_ratio");
GLfloat vertices[] = {-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f};
glGenBuffers(1, global_buffer_names);
glBindBuffer(GL_ARRAY_BUFFER, global_buffer_names[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 9, vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
return 0;
}
void Render()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glUseProgram(global_program_object);
glBindBuffer(GL_ARRAY_BUFFER, global_buffer_names[0]);
glVertexAttribPointer(global_position_location, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(global_position_location);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(global_position_location);
glUseProgram(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void FreeGL()
{
glDeleteBuffers(1, global_buffer_names);
glDeleteProgram(global_program_object);
}
void SetViewport(int width, int height)
{
glViewport(0, 0, width, height);
glUseProgram(global_program_object);
glUniform1f(global_aspect_ratio_location, static_cast<GLfloat>(width) / static_cast<GLfloat>(height));
}
int main(void)
{
GLFWwindow* window;
if (!glfwInit())
return -1;
window = glfwCreateWindow(640, 480, "Hello World", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
InitGL();
// Double the resolution to correctly draw with Retina display
SetViewport(1280, 960);
while (!glfwWindowShouldClose(window))
{
Render();
glfwSwapBuffers(window);
glfwPollEvents();
}
FreeGL();
glfwTerminate();
return 0;
}
Does this look like a bug to you? Can anyone reproduce it? If it's a bug where should I report it?
P.S.
I've also tried SDL instead of GLFW, the behavior is the same...
The behavior you see is actually correct as per the spec, and MacOSX has something to do with this, but only in a very indirect way.
To answer question 1) first: You are basically correct. With modern GLSL (>=3.30), you can also specifiy the desired index via the layout(location=...) qualifier directly in the shader code, instead of using glBindAttribLocation(), but that is only a side note.
The problem you are facing is that you are using a legacy GL context. You do not specify a desired version, so you will get maximum compatibility to the old way. Now on windows, you are very likely to get a compatibility profile of the highest version supported by the implementation (typically GL3.x or GL4.x on non-ancient GPUs).
However, on OSX, you are limited to at most GL2.1. And this is where the crux lies: your code is invalid in GL2.x. To explain this, I have to go back in GL history. In the beginning, there was the immediate mode, so you did draw by
glBegin(primType);
glColor3f(r,g,b);
glVertex3f(x,y,z);
[...]
glColor3f(r,g,b);
glVertex3f(x,y,z);
glEnd();
Note that the glVertex call is what actually creates a vertex. All other per-vertex attributes are basically some current vertex state which can be set any time, but calling glVertex will take all of those current attributes together with the position to form the vertex which is fed to the pipeline.
Now when vertex arrays were added, we got functions like glVertexPointer(), glColorPointer() and so on, and each attribute array could be enabled or disabled separately via glEnableClientState(). The array-based draw calls are actually defined in terms of the immediate mode in the OpenGL 2.1 specification as glDrawArrays(GLenum mode, GLint first, GLsizei count) being equivalent to
glBegin(mode);
for (i=0; i<count; i++)
ArrayElement(first + i);
glEnd();
with ArrayElement(i) being defined (this one is derived from the wording of theGL 1.5 spec):
if ( normal_array_enabled )
Normal3...( <i-th normal value> );
[...] // similiar for all other bultin attribs
if ( vertex_array_enabled)
Vertex...( <i-th vertex value> );
This definition has some sublte consequence: You must have the GL_VERTEX_ARRAY attribute array enabled, otherwise nothing will be drawn, since no equivalent of glVertex calls are generated.
Now when the generic attributes were added in GL2.0, a special guarantee was made: generic attribute 0 is aliasing the builtin glVertex attribute - so both can be used interchangeably, in immediate mode as well as in arrays. So glVertexAttrib3f(0,x,y,z) "creates" a vertex the same way glVertex3f(x,y,z) would have. And using an array with glEnableVertexAttribArray(0) is as good as glEnableClientState(GL_VERTEX_ARRAY).
In GL 2.1, the ArrayElement(i) function now looks as follows:
if ( normal_array_enabled )
Normal3...( <i-th normal value> );
[...] // similiar for all other bultin attribs
for (a=1; a<max_attribs; a++) {
if ( generic_attrib_a_enabled )
glVertexAttrib...(a, <i-th value of attrib a> );
}
if ( generic_attrib_0_enabled)
glVertexAttrib...(0, <i-th value of attrib 0> );
else if ( vertex_array_enabled)
Vertex...( <i-th vertex value> );
Now this is what happens to you. You absolutely need attribute 0 (or the old GL_VERTEX_ARRAY attribute) to be enabled for this to generate any vertices for the pipeline.
Note that it should be possible in theory to just enable attribute 0, no matter if it is used in the shader or not. You should just make sure that the corresponding attrib pointer pionts to valid memory, to be 100% safe. So you simply could check if your attribute index 0 is used, and if not, just set the same pointer as attribute 0 as you did for your real attribute, and the GL should be happy. But I haven't tried this.
In more modern GL, these requirements are not there anymore, and drawing without attribute 0 will work as intended, and that is what you saw on those other systems. Maybe you should consider switching to modern GL, say >= 3.2 core profile, where the issue will not be present (but you need to update your code a bit, including the shaders).
here is my code:
#include <GL/glew.h> // include GLEW and new version of GL on Windows
#include <GLFW/glfw3.h> // GLFW helper library
#include <stdio.h>
int main () {
// start GL context and O/S window using the GLFW helper library
if (!glfwInit ()) {
fprintf (stderr, "ERROR: could not start GLFW3\n");
return 1;
}
// uncomment these lines if on Apple OS X
glfwWindowHint (GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint (GLFW_CONTEXT_VERSION_MINOR, 1);
glfwWindowHint (GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint (GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow* window = glfwCreateWindow (640, 480, "Hello Triangle", NULL, NULL);
if (!window) {
fprintf (stderr, "ERROR: could not open window with GLFW3\n");
glfwTerminate();
return 1;
}
glfwMakeContextCurrent (window);
// start GLEW extension handler
glewExperimental = GL_TRUE;
glewInit ();
// get version info
const GLubyte* renderer = glGetString (GL_RENDERER); // get renderer string
const GLubyte* version = glGetString (GL_VERSION); // version as a string
printf ("Renderer: %s\n", renderer);
printf ("OpenGL version supported %s\n", version);
// tell GL to only draw onto a pixel if the shape is closer to the viewer
glEnable (GL_DEPTH_TEST); // enable depth-testing
glDepthFunc (GL_LESS); // depth-testing interprets a smaller value as "closer"
/* OTHER STUFF GOES HERE NEXT */
float points[] = {
0.0f, 0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, -0.5f, 0.0f
};
GLuint vbo = 0;
glGenBuffers (1, &vbo);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
glBufferData (GL_ARRAY_BUFFER, 9 * sizeof (float), points, GL_STATIC_DRAW);
GLuint vao = 0;
glGenVertexArrays (1, &vao);
glBindVertexArray (vao);
glEnableVertexAttribArray (0);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer (0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
const char* vertex_shader =
"#version 410\n"
"layout(location = 0) in vec4 vPosition;"
"void main () {"
" gl_Position = vPosition;"
"}";
const char* fragment_shader =
"#version 410\n"
"out vec4 frag_colour;"
"void main () {"
" frag_colour = vec4 (0.5, 0.0, 0.5, 1.0);"
"}";
GLuint vs = glCreateShader (GL_VERTEX_SHADER);
glShaderSource (vs, 1, &vertex_shader, NULL);
glCompileShader (vs);
GLuint fs = glCreateShader (GL_FRAGMENT_SHADER);
glShaderSource (fs, 1, &fragment_shader, NULL);
glCompileShader (fs);
GLuint shader_programme = glCreateProgram ();
glAttachShader (shader_programme, fs);
glAttachShader (shader_programme, vs);
glLinkProgram (shader_programme);
while (!glfwWindowShouldClose (window)) {
// wipe the drawing surface clear
glClear (GL_DEPTH_BUFFER_BIT);
const GLfloat color[]={0.0,0.2,0.0,1.0};
//glClearBufferfv(GL_COLOR,0,color);
glUseProgram (shader_programme);
glBindVertexArray (vao);
// draw points 0-3 from the currently bound VAO with current in-use shader
glDrawArrays (GL_TRIANGLES, 0, 3);
// update other events like input handling
glfwPollEvents ();
// put the stuff we've been drawing onto the display
glfwSwapBuffers (window);
} // close GL context and any other GLFW resources
glfwTerminate();
return 0;
}
when i comment line glClear(GL_DEPTH_BUFFER_BIT),the window showing up did not display anything,does this routine matter?
i am using Xcode and Mac OS X 10.1.2,please help me with this ,thanks
The depth buffer is used to decide if geometry you render is closer to the viewer than geometry you rendered previously. This allows the elimination of hidden geometry.
This test is executed per fragment (pixel). Any time a fragment is rendered, its depth is compared to the corresponding value in the depth buffer. If the new depth is bigger, the fragment is eliminated by the depth test. Otherwise, the fragment is written to the color buffer, and the value in the depth buffer is updated with the depth of the new fragment. The functionality is controlled by these calls you make during setup:
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
This way, if a fragment is covered by multiple triangles, the color of the final pixels will be given by the geometry that was closest to the viewer.
Now, if you don't clear the depth buffer at the start of each frame, the comparison to the value in the depth buffer described above will use whatever value happens to be in the depth buffer. This could be a value from a previous frame, or an uninitialized garbage value. Therefore, fragments can be eliminated by the depth test even though no fragments in the current frame were drawn at the same position before. In the extreme case, all fragments are eliminated by the depth test, and you see nothing at all.
Unless you are certain that you will render something to all pixels in your window, you will also want to clear the color buffer at the start of the frame. So your clear call should be:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// wipe the drawing surface clear
, glClear (GL_DEPTH_BUFFER_BIT);
What that comment means above the code means is that it clears the depth buffer. The depth buffer is the part of the frame buffer, that makes objects being obstructed by other objectsin front of them. Without clearing the depth buffer, you'd draw into the depth structure of the previous drawing.
I have some simple code that draws a 30x30 ball to the screen. It worked perfectly when it was all inside of main.cpp, but when I put it into its own separate class I got a very faint grey box taking up the top right of the screen instead. When I paste the exact code I have inside of my Ball class into main.cpp it works perfectly again.
When the code is in the Ball class:
When I paste all of the code from the Ball class into main (and how it was before the Ball class):
The code can be found here.
main.cpp
#include "Includes.hpp"
static unsigned int Width = 800;
static unsigned int Height = 600;
int main()
{
bool CursorEnabled = true;
bool PolygonMode = false;
//Camera.CameraInit(45.0f , Width , Height , Model); use ortho instead \/
Camera.CameraInit (Width, Height, glm::mat4 (1.0f));
GLuint vao;
glGenVertexArrays (1 , &vao);
glBindVertexArray(vao);
Ball ball;
float LastTime = glfwGetTime(); // (for deltatime)
while (true)
{
float DeltaTime = glfwGetTime() - LastTime; // Deltatime init
LastTime = glfwGetTime(); // update for deltatime
glClearColor (0.0f , 0.0f , 0.0f , 1.0f);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
ball.Draw();
glfwSwapBuffers();
if (glfwGetKey (GLFW_KEY_ESC) || !glfwGetWindowParam(GLFW_OPENED))
{
std::cout << "\nShutdown command. Exiting...\n" << std::flush;
return 0;
}
}
}
Ball.hpp
#ifndef BALL
#define BALL
class Ball
{
private:
//Vertices and UV coordiantes.
std::vector <glm::vec2> ballVert;
std::vector <glm::vec2> ballUV;
//Buffers
GLuint vertBuf;
GLuint uvBuf;
//Image
GLuint ballTex;
//IDs for the shader.
GLuint shader;
GLuint MVPid;
GLuint sampler;
GLuint coord;
GLuint uv;
public:
//Constructor
Ball();
void Draw();
};
#endif
Ball.cpp
#include "Includes.hpp"
Ball::Ball()
{
//Create ball vertices and UV coords
ballVert.push_back(glm::vec2(0 , 0)); /*left /\*/
ballVert.push_back(glm::vec2(0 , 30)); /*right / \*/
ballVert.push_back(glm::vec2(30 , 0)); /*top ------*/
ballVert.push_back(glm::vec2(0 , 30)); /*left /\*/
ballVert.push_back(glm::vec2(30 , 30)); /*right / \*/
ballVert.push_back(glm::vec2(30 , 0)); /*top ------*/
ballUV.push_back(glm::vec2(0 , 1));
ballUV.push_back(glm::vec2(0 , 0));
ballUV.push_back(glm::vec2(1 , 1));
ballUV.push_back(glm::vec2(0 , 0));
ballUV.push_back(glm::vec2(1 , 0));
ballUV.push_back(glm::vec2(1 , 1));
//Load the image
ballTex = SOIL_load_OGL_texture (
"img/ball.png",
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID, SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT);
//Generate buffers
glGenBuffers (1 , &vertBuf);
glBindBuffer (GL_ARRAY_BUFFER , vertBuf);
glBufferData (GL_ARRAY_BUFFER , ballVert.size() * sizeof(glm::vec2) , &ballVert[0] , GL_STATIC_DRAW);
glGenBuffers (1 , &uvBuf);
glBindBuffer (GL_ARRAY_BUFFER , uvBuf);
glBufferData (GL_ARRAY_BUFFER , ballUV.size() * sizeof (glm::vec2) , &ballUV[0] , GL_STATIC_DRAW);
shader = Game.LoadShaders ("src/glsl/ball.vert" , "src/glsl/ball.frag");
MVPid = glGetUniformLocation (shader , "MVP");
sampler = glGetUniformLocation (shader, "sampler");
coord = glGetAttribLocation(shader, "coord");
uv = glGetAttribLocation (shader, "uv");
}
void Ball::Draw()
{
glUseProgram (shader);
glUniformMatrix4fv(MVPid, 1, GL_FALSE, &Camera.GetMVP()[0][0]); // Send MVP to shader
glActiveTexture (GL_TEXTURE0); // bind the ball texture
glBindTexture (GL_TEXTURE_2D, ballTex);
glUniform1i (sampler , 0);
glEnableVertexAttribArray (coord); // sending the vertex data
glBindBuffer(GL_ARRAY_BUFFER, vertBuf);
glVertexAttribPointer(
coord, // The attribute we want to configure
2, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray (uv);
glBindBuffer (GL_ARRAY_BUFFER , uvBuf);
glVertexAttribPointer (uv, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, ballVert.size());
glDisableVertexAttribArray(coord);
glDisableVertexAttribArray(uv);
}
I got some time and I've read about VBO and that's what I got :
http://img64.imageshack.us/img64/5733/fps8.jpg
Ok, it's much better than before. It's compiled on Release. I uses VBO (probably, if everthing's OK) and glDrawArrays to draw.
Here's drawing code. Please give me advice how to optimize it. I wanted with terrain ... uh few thousand FPS, is it real ?
void DrawVBO (void)
{
int i;
CVert* m_pVertices;
CTexCoord* m_pTexCoords;
unsigned int m_nTextureId;
m_pVertices = NULL;
m_pTexCoords = NULL;
m_nVertexCount = 0;
m_nVBOVertices = m_nVBOTexCoords = m_nTextureId = 0;
if( IsExtensionSupported( "GL_ARB_vertex_buffer_object" ) )
{
// Pobierz wskaźniki na funkcje OpenGL
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC) wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC) wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC) wglGetProcAddress("glBufferDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC)
wglGetProcAddress("glDeleteBuffersARB");
}
todrawquads=0;
nIndex=0;
// my function counting how many quads I will draw
for (i=0;i<MAX_CHUNKS_LOADED;i++)
{
if (chunks_loaded[i].created==1)
{
countquads(i);
}
}
m_nVertexCount=4*todrawquads;
m_pVertices = new CVec[m_nVertexCount];
m_pTexCoords = new CTexCoord[m_nVertexCount];
// another my function adding every quad which i'm going to draw (its verticles) to array
for (i=0;i<MAX_CHUNKS_LOADED;i++)
{
if (chunks_loaded[i].created==1)
{
addchunktodraw(i,m_pVertices,m_pTexCoords);
}
}
glClearColor (1,1,1, 0.0);
glColor3f(1,1,1);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear Screen And Depth Buffer
glLoadIdentity (); // Reset The Modelview Matrix
fps++;
// Camera settings.
//gluLookAt (zom, zom, zom, 0.0, 0.0, 0.0, 0, 0, 1);
gluLookAt (zoom, zoom, zoom, 0.0, 0.0, 0.0, 0, 0, 1);
glRotatef((rot_x / 180 * 3.141592654f),1,0,0);
glRotatef((rot_y / 180 * 3.141592654f),0,1,0);
glRotatef((rot_z / 180 * 3.141592654f),0,0,1);
//m_nTextureId = t_terrain;
// Generate And Bind The Vertex Buffer
glGenBuffersARB( 1, &m_nVBOVertices ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*3*sizeof(float), m_pVertices, GL_STATIC_DRAW_ARB );
// Generate And Bind The Texture Coordinate Buffer
glGenBuffersARB( 1, &m_nVBOTexCoords ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*2*sizeof(float), m_pTexCoords, GL_STATIC_DRAW_ARB );
// Enable Pointers
glEnableClientState( GL_VERTEX_ARRAY ); // Enable Vertex Arrays
glEnableClientState( GL_TEXTURE_COORD_ARRAY ); // Enable Texture Coord Arrays
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glVertexPointer( 3, GL_FLOAT, 0, (char *) NULL ); // Set The Vertex Pointer To The Vertex Buffer
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glTexCoordPointer( 2, GL_FLOAT, 0, (char *) NULL ); // Set The TexCoord Pointer To The TexCoord Buffe
glDrawArrays( GL_QUADS, 0, m_nVertexCount); // Draw All Of The Triangles At Once
glDisableClientState( GL_VERTEX_ARRAY ); // Disable Vertex Arrays
glDisableClientState( GL_TEXTURE_COORD_ARRAY ); // Disable Texture Coord Arrays
liniergb();
glutSwapBuffers();
delete [] m_pVertices; m_pVertices = NULL;
delete [] m_pTexCoords; m_pTexCoords = NULL;
}
So what I can do with it ?
(Code above is main draw function)
edit
I've moved this :
if( IsExtensionSupported( "GL_ARB_vertex_buffer_object" ) )
{
// Pobierz wskaźniki na funkcje OpenGL
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC) wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC) wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC) wglGetProcAddress("glBufferDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC)
wglGetProcAddress("glDeleteBuffersARB");
}
to my main function. Probably no improvement.
edit2
Now , also I can see that it's eating memory. Every few seconds program memory usage is rising up more and more ... What's wrong ? What I'm not deleting ?
edit3
Ok , thanks sooooo much. I've moved some code outside draw function and ... much more fps !
Thanks so much !
http://img197.imageshack.us/img197/5193/fpsfinal.jpg
It's 640x640 blocks (so 40 times bigger) map with 650'000 quads (about 70 times more) and still ~170 fps. Great ! And no memory leaks. Thanks again !
You're reloading all your buffers and freeing them again in every single frame? Stop doing that and your frame rate will go up.
Note that your current code will eventually run out of VBO identifiers, since you never delete the VBOs you create.
Linking extension functions definitely doesn't need to be done every frame either.
Your DrawVBO function should contain only:
glClearColor (1,1,1, 0.0);
glColor3f(1,1,1);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear Screen And Depth Buffer
glLoadIdentity (); // Reset The Modelview Matrix
fps++;
// Camera settings.
//gluLookAt (zom, zom, zom, 0.0, 0.0, 0.0, 0, 0, 1);
gluLookAt (zoom, zoom, zoom, 0.0, 0.0, 0.0, 0, 0, 1);
glRotatef((rot_x / 180 * 3.141592654f),1,0,0);
glRotatef((rot_y / 180 * 3.141592654f),0,1,0);
glRotatef((rot_z / 180 * 3.141592654f),0,0,1);
// Enable Pointers
glEnableClientState( GL_VERTEX_ARRAY ); // Enable Vertex Arrays
glEnableClientState( GL_TEXTURE_COORD_ARRAY ); // Enable Texture Coord Arrays
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glVertexPointer( 3, GL_FLOAT, 0, (char *) NULL ); // Set The Vertex Pointer To The Vertex Buffer
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glTexCoordPointer( 2, GL_FLOAT, 0, (char *) NULL ); // Set The TexCoord Pointer To The TexCoord Buffe
glDrawArrays( GL_QUADS, 0, m_nVertexCount); // Draw All Of The Triangles At Once
glDisableClientState( GL_VERTEX_ARRAY ); // Disable Vertex Arrays
glDisableClientState( GL_TEXTURE_COORD_ARRAY ); // Disable Texture Coord Arrays
liniergb();
glutSwapBuffers();
You need to move the rest to separate function called only once at start-up (or when the terrain changes).
A few things stand out:
In your drawing function you're allocating memory for your geometry data
m_pVertices = new CVec[m_nVertexCount];
m_pTexCoords = new CTexCoord[m_nVertexCount];
Memory allocation is a extreme expensive operation, this is one of those things that should be done only once. OpenGL is not meant to be "initialized" – but the data structures you're going to pass to it are!
Here you're copying the newly allocated buffers to OpenGL, again and again with each frame. This is exactly the opposite of what to do.
// Generate And Bind The Vertex Buffer
glGenBuffersARB( 1, &m_nVBOVertices ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*3*sizeof(float), m_pVertices, GL_STATIC_DRAW_ARB );
// Generate And Bind The Texture Coordinate Buffer
glGenBuffersARB( 1, &m_nVBOTexCoords ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*2*sizeof(float), m_pTexCoords, GL_STATIC_DRAW_ARB );
The whole idea of VBOs is, to load the data only once, copy it to OpenGL and then never reallocate it again. Note that this is not OpenGL initialization, it's data initialization, something totally reasonable. I see that you have named your variables m_pVertices and m_pTexCoords indicating that those are class member variables. Then the solution is simple: Move that whole initialization code into some loader function. Also instead of naked C++ arrays I strongly suggest using std::vector.
So let's fix this:
// Load Extensions only once. Well, once per context actually, however
// why don't you just use an extension wrapper and forget about those
// gritty details? Google GLEW or GLee
void init_extensions()
{
if( IsExtensionSupported( "GL_ARB_vertex_buffer_object" ) )
{
// Pobierz wska\u017aniki na funkcje OpenGL
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC) wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC) wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC) wglGetProcAddress("glBufferDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC) wglGetProcAddress("glDeleteBuffersARB");
}
}
class Block0r
{
protected:
GLuint m_nTextureId;
GLuint m_nVBOVertices;
GLuint m_nVBOTexCoords;
GLuint m_nVertexCount;
// Call this one time to load the data
void LoadVBO()
{
std::vector<CVert> vertices;
std::vector<CTexCoord> texCoords;
// my function counting how many quads I will draw
todrawquads = 0;
for(int i=0; i < MAX_CHUNKS_LOADED; i++) {
if( chunks_loaded[i].created == 1 ) {
countquads(i);
}
}
m_nVertexCount = 4*todrawquads;
vertices.resize(vertexcount);
texcoords.resize(vertexcount);
for (i=0;i<MAX_CHUNKS_LOADED;i++) {
if (chunks_loaded[i].created==1) {
addchunktodraw(i, &vertices[0], &texcoords[0]);
}
}
glGenBuffersARB( 1, &m_nVBOVertices );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glBufferDataARB( GL_ARRAY_BUFFER_ARB, vertices.size()*sizeof(CVert), &vertices[0], GL_STATIC_DRAW_ARB );
glGenBuffersARB( 1, &m_nVBOTexCoords );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glBufferDataARB( GL_ARRAY_BUFFER_ARB, texCoords.size()*sizeof(CTexCoord), &texCoords[0], GL_STATIC_DRAW_ARB );
}
void DrawVBO()
{
glClearColor (1,1,1, 0.0);
glColor3f(1,1,1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
setup_projection(); // this should really be done in the drawing handler
glMatrixMode(GL_MODELVIEW); // don't asssume a certain matrix being active!
glLoadIdentity();
fps++;
gluLookAt (zoom, zoom, zoom, 0.0, 0.0, 0.0, 0, 0, 1);
glRotatef((rot_x / 180 * 3.141592654f),1,0,0);
glRotatef((rot_y / 180 * 3.141592654f),0,1,0);
glRotatef((rot_z / 180 * 3.141592654f),0,0,1);
glEnableClientState( GL_VERTEX_ARRAY );
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glVertexPointer( 3, GL_FLOAT, 0, (GLvoid *) 0 );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glTexCoordPointer( 2, GL_FLOAT, 0, (GLvoid *) 0 );
glDrawArrays( GL_QUADS, 0, m_nVertexCount);
glDisableClientState( GL_VERTEX_ARRAY );
glDisableClientState( GL_TEXTURE_COORD_ARRAY );
liniergb();
glutSwapBuffers();
}
}
On a side note: Comments like // Generate And Bind The Vertex Buffer or // Set The Vertex Pointer To The Vertex Buffer are not very usefull. Those comments just redundantly tell, what one can read from the code anyway.
Comments should be added to code, which inner workings are not immediately understandably, or if you had to do some kind of hack to fix something and this hack would puzzle someone else, or yourself in a few months time.