I'm having a hard time getting multiple vertex array objects to render multiple primitives. Nearly all the OpenGL tutorials I've found online show using only a single VAO, so I'm not sure what I might be doing wrong.
I'm using Qt-OpenGL and trying to render a square and a cube (on multiple VAOs).
Using the following code, I'm only getting one primitive displayed on the screen (whichever one is initialized second). I can see either primitive when I turn off initialization of the other one, but not at the same time.
Data Struct:
struct VBO : public QOpenGLBuffer {
};
struct VAO : public QOpenGLVertexArrayObject {
VBO vbo[1];
};
enum { CIRCLE, RECT, NUM_VAOs };
enum { POS, NUM_VBOs };
VAO vao[NUM_VAOs];
Initialization:
static void init_objects() {
for(int i = 0; i < NUM_VAOs; ++i) {
vao[i].create();
}
vao[CIRCLE].bind();
for(int i = 0; i < NUM_VBOs; ++i) {
vao[CIRCLE].vbo[i].create();
vao[CIRCLE].vbo[i].setUsagePattern( QOpenGLBuffer::StaticDraw );
}
vao[CIRCLE].vbo[POS].bind();
vao[CIRCLE].vbo[POS].allocate(circle.getVertexData(), circle.getNumVertices()*3*sizeof(float));
vao[CIRCLE].release();
// repeat for RECTANGLE instead of CIRCLE
vao[RECT].bind();
for(int i = 0; i < NUM_VBOs; ++i) {
vao[RECT].vbo[i].create();
vao[RECT].vbo[i].setUsagePattern( QOpenGLBuffer::StaticDraw );
}
vao[RECT].vbo[POS].bind();
vao[RECT].vbo[POS].allocate(circle.getVertexData(), circle.getNumVertices()*3*sizeof(float));
}
Rendering Code:
void game::paintGL() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vao[RECT].bind();
vao[RECT].vbo[POS].bind();
QMatrix4x4 id;
Game.setMVP( id );
Game.setColor( Colour(0.0, 1.0, 0.0) );
glDrawElements(GL_TRIANGLES, rect.getSolidIndices().size()*sizeof(unsigned int),
GL_UNSIGNED_INT, &(rect.getSolidIndices()[0]));
glFinish();
vao[RECT].release();
// Now for circle:
vao[CIRCLE].bind();
vao[CIRCLE].vbo[POS].bind();
Game.setMVP( id );
Game.setColor( Colour(1.0, 0.0, 0.0) );
glDrawElements(GL_TRIANGLES, circle.getSolidIndices().size()*sizeof(unsigned int),
GL_UNSIGNED_INT, &(circle.getSolidIndices()[0]));
glFlush();
}
I've tried reading the data from the buffers before rendering (they are distinct and what I would expect for each primitive), so I know the write occurred properly. I'm guessing now I might be binding the buffers wrong, or missing a step while rendering.
Related
I would like to create a renderer class that can switch between 2 or more shaders without adding more and more draw calls.
What I mean is to have 2 shaders - A and B - and method that takes shader, position, size to create for example a quad.
and I want to add that data (position, size) and pass it to vertex A (so it is 1st draw calls), then add another data to vertex B(so it is 2nd draw call) and again add data to shader A (so it still should be 2 draw calls, because we have already used shader A somewhere before). And at the end go though draw calls and draw a scene.
I have a RenderData class that adds draw calls, vertex, element data, etc.
struct DrawCall
{
//it may have more data like texture, clip rect, camera, etc.
Shader* shader = nullptr;
};
struct Vertex
{
Vector2 position;
}
class RenderData
{
public:
RenderData();
~RenderData();
void Free() {
vertexBuffer.clear();
shader.clear();
drawCall.clear();
elementBuffer.clear();
}
void Draw(const Rect& dest);
void AddDrawCall();
inline DrawCall* getDrawCall() { return drawCall.size() > 0 ? &drawCall.back() : nullptr; }
void UpdateShader();
void PushShader(Shader* shader);
void PopShader();
inline Shader* getShader() { return shader.size() > 0 ? shader.back() : nullptr; }
uint currentVertexIndex = 0;
vector<Vertex> vertexBuffer; // Vertex data
vector<Shader*> shader;
vector<DrawCall> drawCall;
vector<uint> elementBuffer; // Index data
}
void RenderData::AddDrawCall()
{
DrawCall dc;
dc.shader = getShader();
drawCall.push_back(dc);
}
void RenderData::UpdateShader()
{
Shader* currentShader = getShader();
DrawCall* currentDraw = getDrawCall();
if (!currentDraw || currentDraw->shader != currentShader) {
AddDrawCall();
return;
}
DrawCall* prevDraw = drawCall.size() > 1 ? currentDraw - 1 : nullptr;
if (prevDraw->shader == currentShader) {
drawCall.pop_back();
} else { currentDraw->shader = currentShader; }
}
void RenderData::PushShader(Shader* shader)
{
this->shader.push_back(shader);
UpdateShader();
}
void RenderData::PopShader()
{
Custom_Assert(shader.size() > 0, "Cannot PopShader() with size < 0!\n");
shader.pop_back();
UpdateShader();
}
void RenderData::Draw(const Rect& dest)
{
//dest -> x, y, w and h
//setup vertices
vertexBuffer.push_back(...);
vertexBuffer.push_back(...);
vertexBuffer.push_back(...);
vertexBuffer.push_back(...);
//setup elements
elementBuffer.push_back(...);
elementBuffer.push_back(...);
elementBuffer.push_back(...);
elementBuffer.push_back(...);
elementBuffer.push_back(...);
elementBuffer.push_back(...);
}
and Renderer2D class which has few objects:
vao, vbo, ebo, RenderData
and few methods:
Create() -> it creates ebo and ebo
RenderClear() -> it Free() RenderData, sets up viewport
RenderPresent -> it creates and binds vao, binds vbo, adds vbo attributes and data, binds ebo and adds ebo data, and goes through DrawCall& drawCall : renderData.drawCall, uses shader program and draws elements;
void Renderer2D::Create()
{
//gens and binds
vbo = vbo->Create(TYPE::ARRAY, USAGE::DYNAMIC_DRAW));
//gens and binds
ebo = ebo->Create(TYPE::ELEMENT, USAGE::DYNAMIC_DRAW));
}
void Renderer2D::RenderClear()
{
setRenderViewport(0, 0, 1280, 720);
renderData.Free();
}
void Renderer2D::RenderPresent()
{
vao = vao->Create();
vbo->BindBuffer();
vbo->AddAttribute(0, 2, GL_FLOAT, false, sizeof(Vertex), (const void*)offsetof(Vertex, position));
vbo->AddData(renderData.vertexBuffer.size() * sizeof(Vertex), renderData.vertexBuffer.data());
ebo->BindBuffer();
ebo->AddData(renderData.elementBuffer.size() * sizeof(uint), renderData.elementBuffer.data());
for (auto& drawCall : renderData.drawCall) {
drawCall.shader->UseProgram();
vao->DrawElements(drawCall.elemCount, GL_UNSIGNED_INT, nullptr);
}
//delete vertex array
vao->Free();
}
how it works:
int main()
{
Renderer2D renderer2D;
renderer2D.Create();
Shader A("shader.vtx", "shader.frag");
Shader B("shader.vtx", "shader2.frag");
while(!quit) {
renderer2D.RenderClear();
//Push A shader = add 1st draw call
renderer2D->PushShader(&A);
renderer2D->Draw({ 100.0f, 100.0f, 50.0f, 50.0f });
renderer2D->PopShader();
//Push B shader = add 2nd draw call
renderer2D->PushShader(&B);
renderer2D->Draw({ 200.0f, 200.0f, 50.0f, 50.0f });
renderer2D->PopShader();
//Push A shader = do not add 3rd draw call, use already existing one
//This version adds 3rd draw call instead of using existing one
renderer2D->PushShader(&A);
renderer2D->Draw({ 400.0f, 400.0f, 50.0f, 50.0f });
renderer2D->PopShader();
renderer2D.RenderPresent();
}
return 0;
}
I would like to change it, somehow, to work as I described, but I do not know how (if it is even possible) to do it.
I'm following a tutorial on creating a Game Engine in Java using OpenGL.
I'm trying to render a triangle on the screen. Everything is running fine and I can change the background color but the triangle won't show. I've also tried running the code provided as part of the tutorial series and it still doesn't work.
Link to the tutorial: http://bit.ly/1EUnvz4
Link to the code used in the video: http://bit.ly/1z7XUlE
Setup
I've tried checking for OpenGL version and belive I have 2.1.
Mac OSX
Java - Eclipse
Mesh.java
import static org.lwjgl.opengl.GL11.*;
import static org.lwjgl.opengl.GL15.*;
import static org.lwjgl.opengl.GL20.*;
public class Mesh
{
private int vbo; //pointer to the buffer
private int size; //size of the data to buffer
public Mesh ()
{
vbo = glGenBuffers();
size = 0;
}
public void addVertices (Vertex[] vertices)
{
size = vertices.length;
//add the data by first binding the buffer
glBindBuffer (GL_ARRAY_BUFFER, vbo); //vbo is now the buffer
//and then buffering the data
glBufferData (GL_ARRAY_BUFFER, Util.createFlippedBuffer(vertices), GL_STATIC_DRAW);
}
public void draw ()
{
glEnableVertexAttribArray (0); //divide up the data into a segment
glBindBuffer (GL_ARRAY_BUFFER, vbo); //vbo is now the buffer
//tell OpenGL more about the segment:
//segment = 0, elements = 3, type = float, normalize? = false, vertex size, where to start = 0)
glVertexAttribPointer(0, 3, GL_FLOAT, false, Vertex.SIZE * 4, 0);
//draw GL_TRIANGLES starting from '0' with a given 'size'
glDrawArrays (GL_TRIANGLES, 0, size);
glDisableVertexAttribArray (0);
}
}
RenderUtil.java
import static org.lwjgl.opengl.GL11.*;
import static org.lwjgl.opengl.GL30.*;
public class RenderUtil
{
public static void clearScreen ()
{
//TODO: Stencil Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
//set everything to engine defaults
public static void initGraphics ()
{
glClearColor(0.0f, 0.0f, 0.0f, 0.0f); // default color
glFrontFace(GL_CW); // direction for visible faces
glCullFace(GL_BACK); // direction for back faces
glEnable (GL_CULL_FACE); // don't draw back faces
glEnable (GL_DEPTH_TEST); // determines draw order by pixel depth testing
//TODO: Depth clamp for later
glEnable (GL_FRAMEBUFFER_SRGB); // do exponential correction on gamma so we don't have to
}
}
Util.java
import java.nio.FloatBuffer;
import org.lwjgl.BufferUtils;
public class Util
{
//create a float buffer (we need this because java is weird)
public static FloatBuffer createFloatBuffer (int size)
{
return BufferUtils.createFloatBuffer(size);
}
//flip the buffer to fit what OpenGL expects
public static FloatBuffer createFlippedBuffer (Vertex[] vertices)
{
FloatBuffer buffer = createFloatBuffer(vertices.length * Vertex.SIZE);
for (int i = 0; i < vertices.length; i++)
{
buffer.put(vertices[i].getPos().getX());
buffer.put(vertices[i].getPos().getY());
buffer.put(vertices[i].getPos().getZ());
}
buffer.flip();
return buffer;
}
}
You are using an invalid mix of legacy and modern OpenGL.
The glVertexAttribPointer() and glEnableVertexAttribArray() functions you are calling are used for setting up generic vertex attributes. This is the only way to set up vertex attribues in current versions of OpenGL (Core Profile of desktop OpenGL, or OpenGL ES 2.0 and later). They can be used in older versions of OpenGL as well, but only in combination with providing your own shaders implemented in GLSL.
If you are just getting started, your best option is probably to stick with what you have, and study how to start implementing your own shaders. If you wanted to get the code working with the legacy fixed pipeline (which is only supported in the Compatibility Profile of OpenGL), you would need to use the glVertexPointer() and glEnableClientState() functions instead.
Try a single import?
import static org.lwjgl.opengl.GL11.*
I only have one import on mine, also try importing the packages you need separately. One thing you are likely doing wrong is importing multiple versions of OpenGL
I am trying to integrate the Assimp loader to my framework. Everything is rendered fine, but in this spider model I'm rendering, its fangs are not being drawn as expected (see following picture).
Below is the relevant code snippet:
//Storing the Indices
for (unsigned int t = 0; t < mesh->mNumFaces; ++t) {
aiFace* face = &mesh->mFaces[t];
memcpy(&faceArray[index], face->mIndices, 3*sizeof(unsigned int));
index += 3;
}
//Storing the Vertices
for (unsigned int t = 0; t < mesh->mNumVertices; ++t) {
aiVector3D vertex ;
if (mesh->HasPositions()) {
vertex = mesh->mVertices[t];
memcpy(&vertexArray[index], &vertex,3*sizeof(float));
}
index += 3;
}
//Render module
void model::helperDraw(GLuint vertexBufferID, GLuint indexBufferID, GLuint textureID)
{
GLint indexSize;
glBindBuffer(GL_ARRAY_BUFFER,vertexBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,indexBufferID);
glGetBufferParameteriv(GL_ELEMENT_ARRAY_BUFFER, GL_BUFFER_SIZE, &indexSize);
glBindTexture( GL_TEXTURE_2D, textureID);
glDrawElements(GL_TRIANGLES, indexSize/sizeof(GLuint), GL_UNSIGNED_INT, 0);
}
What could be wrong with my code?
There is nothing obviously wrong with your code. One possible cause for these rendering artefacts is that the OBJ model you load has some faces that are triangles an some faces that are not. You are rendering everything as GL_TRIANGLES, but the OBJ format can specify faces as quads, triangle-strips, triangles and even other more exotic things like patches.
Assimp has a mesh triangulation facility that can make your life a lot easier when dealing with these multi-format mesh files, such as the OBJ. Try passing the flag aiProcess_Triangulate to the load method of the importer or even to the post-processing method if you do post-processing as a separate step. This is likely to fix the issue.
I am trying to incorporate openGL into my c++ code for the first time. As a start up, I made this very primitive code, which defines a class called polygon and should display a polygon with a method polygon.draw(). Right now, everything below resides in a single main.cpp file, though for easy reading I am separating into section here:
The problem is, the below code compiles and runs alright. Only when the window named "simple" is created, displays garbage (mostly collected from my computer background screen :(.
Firstly, the class polygon:
#include <GL/glut.h>
#include "utility.hpp"
#include <vector>
void init(void);
class nikPolygon{
public:
std::vector<nikPosition> m_vertices;
nikColor m_color;
double m_alpha;
// constructors
// without alpha (default is 1.0)
nikPolygon(std::vector<nikPosition> vList, nikColor c):
m_vertices(vList), m_color(c), m_alpha(1.0){
}
nikPolygon(std::vector<nikPosition> vList, nikColor c, double a):
m_vertices(vList), m_color(c), m_alpha(a){
}
// default constructor
nikPolygon(){
}
// member functions
// add vertex
void addVertex(nikPosition v) { m_vertices.push_back(v); }
// remove vertex
void removeVertex(nikPosition v);
// adjust vertex
void modifyVertex(unsigned int vIndex, nikPosition newPosition);
// fill color
void setColor(nikColor col) { m_color = col; }
// set alpha
void setAlpha(double a) { m_alpha = a; }
// display
void drawPolygon(void){
// color the objet
glColor4f(m_color.red,
m_color.green,
m_color.blue,
m_alpha);
// construct polygon
glBegin(GL_POLYGON);
for (std::vector<nikPosition>::iterator it = m_vertices.begin();
it != m_vertices.end(); it++)
glVertex2f(it->x, it->y);
glEnd();
// send to screen
glFlush();
}
void draw(void);
};
Then the c/c++ callback interface (trampoline/thunk):
// for c++/c callback
nikPolygon* currentPolygon;
extern "C"
void drawCallback(void){
currentPolygon->drawPolygon();
}
void nikPolygon::draw(){
currentPolygon = this;
glutDisplayFunc(drawCallback);
}
And then the rest of it:
// initialize openGL etc
void init(void){
// set clear color to black
glClearColor(0.0, 0.0, 0.0, 0.0);
// set fill color to white
glColor3f(1.0, 1.0, 1.0);
// enable transperancy
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// setup standard orthogonal view with clipping
// box as cube of side 2 centered at origin
// this is the default view
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(-1.0, 1.0, -1.0, 1.0);
}
int main(int argc, char** argv){
nikPolygon poly;
poly.addVertex(nikPosition(-0.5, -0.5));
poly.addVertex(nikPosition(-0.5, 0.5));
poly.addVertex(nikPosition(0.5, 0.5));
poly.addVertex(nikPosition(0.5, -0.5));
poly.setColor(nikColor(0.3, 0.5, 0.1));
poly.setAlpha(0.4);
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(500, 500);
glutInitWindowPosition(0, 0);
glutCreateWindow("simple");
init();
poly.draw();
glutMainLoop();
}
First and foremost, the original code is completely overengineered. This may be part of the original confusion. Also there's not really much you can do, to fix the code, without throwing out most of it. For example representing each polygon (triangle) with a own object instance is about as inefficient as it can get. You normally do not want to do this. The usual approach at representing a model is a Mesh, which consists of a list/array of vertex attributes, and a list of faces, which is in essence a list of 3-tuples defining the triangles, making up the surface of the mesh. In class form
class Mesh
{
std::vector<float> vert_position;
std::vector<float> vert_normal;
std::vector<float> vert_texUV;
std::vector<unsigned int> faces_indices;
public:
void draw();
};
Then to draw a mesh you use Vertex Arrays
void Mesh::draw()
{
// This is the API as used up to including OpenGL-2.1
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXCOORD_ARRAY);
// sizes of attributes depend on actual application
glVertexPointer(3, GL_FLOAT, 0, &vert_position[0]);
glNormalPointer(GL_FLOAT, 0, &vert_normal[0]);
glTexCoordPointer(2, GL_FLOAT, 0, &vert_texUV[0]);
glDrawElements(GL_TRIANGLES, faces_indices.size(), GL_UNSIGNED_INT, &faces_indices[0]);
}
You put references to these Mesh object instances into a list, or array, and iterate over that in the display function, calling the draw method, after setting the appropriate transformation.
std::list<Mesh> list_meshes;
void display()
{
clear_framebuffer();
set_viewport_and_projection();
for(std::list<Mesh>::iterator mesh_iter = list_meshes.begin();
mesh_iter != list_meshes.end();
mesh_iter++) {
mesh_iter->draw()
}
swap_buffers();
}
At the beginning of your drawPolygon function you need to do a glClear(GL_COLOR_BUFFER_BIT);
I recently switched from intermediate mode and have a new rendering process. There must be something I am not understanding. I think it has something to do with the indices.
Here is my diagram: Region->Mesh->Polygon Array->3 vertex indices which references the master list of vertices.
Here my render code:
// Render the mesh
void WLD::render(GLuint* textures, long curRegion, CFrustum cfrustum)
{
int num = 0;
// Set up rendering states
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Set up my indices
GLuint indices[3];
// Cycle through the PVS
while(num < regions[curRegion].visibility.size())
{
int i = regions[curRegion].visibility[num];
// Make sure the region is not "dead"
if(!regions[i].dead && regions[i].meshptr != NULL)
{
// Check to see if the mesh is in the frustum
if(cfrustum.BoxInFrustum(regions[i].meshptr->min[0], regions[i].meshptr->min[2], regions[i].meshptr->min[1], regions[i].meshptr->max[0], regions[i].meshptr->max[2], regions[i].meshptr->max[1]))
{
// Cycle through every polygon in the mesh and render it
for(int j = 0; j < regions[i].meshptr->polygonCount; j++)
{
// Assign the index for the polygon to the index in the huge vertex array
// This I think, is redundant
indices[0] = regions[i].meshptr->poly[j].vertIndex[0];
indices[1] = regions[i].meshptr->poly[j].vertIndex[1];
indices[2] = regions[i].meshptr->poly[j].vertIndex[2];
// Enable texturing and bind the appropriate texture
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textures[regions[i].meshptr->poly[j].tex]);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), &vertices[0].x);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &vertices[0].u);
// Draw
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, indices);
}
}
}
num++;
}
// End of rendering - disable states
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
Sorry if I left anything out. And I really appreciate feedback and help with this. I would even consider paying someone who is good with OpenGL and optimization to help me with this.
There is no point in using array rendering if you're only rendering 3 vertices at a time. The idea is to send thousands through with a single call. That is, you render a single "Polygon Array" or "Mesh" with one call.