I work on particles system and I want to use SSBO to make update of velocity and position on my particles with compute shader. But I see for each update-call the compute use same values of positions but compute update position because in draw-call particles are moved.
Load particles into SSBOs
// Load Positions
glGenBuffers(1, &m_SSBOpos);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, m_SSBOpos);
// Allocation de la mémoire vidéo
glBufferData(GL_SHADER_STORAGE_BUFFER, pb.size() * 4 * sizeof(float), NULL, GL_STATIC_DRAW);
GLint bufMask = GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT; // the invalidate makes a big difference when re-writing
float *points = (float *) glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, pb.size() * 4 * sizeof(float), bufMask);
for (int i = 0; i < pb.size(); i++)
{
points[i * 4] = pb.at(i).m_Position.x;
points[i * 4 + 1] = pb.at(i).m_Position.y;
points[i * 4 + 2] = pb.at(i).m_Position.z;
points[i * 4 + 3] = 0;
}
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
// Load vélocité
glGenBuffers(1, &m_SSBOvel);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, m_SSBOvel);
// Allocation de la mémoire vidéo
glBufferData(GL_SHADER_STORAGE_BUFFER, pb.size() * 4 * sizeof(float), NULL, GL_STATIC_DRAW);
float *vels = (float *)glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, pb.size() * 4 * sizeof(float), bufMask);
for (int i = 0; i < pb.size(); i++)
{
vels[i * 4] = pb.at(i).m_Velocity.x;
vels[i * 4 + 1] = pb.at(i).m_Velocity.y;
vels[i * 4 + 2] = pb.at(i).m_Velocity.z;
vels[i * 4 + 3] = 0;
}
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
Update
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 4, shaderUtil.getSSBOpos());
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 5, shaderUtil.getSSBOvel());
// UPDATE DES PARTICULES
shaderUtil.UseCompute();
glUniform1i(shaderUtil.getDT(), fDeltaTime);
glDispatchCompute(NUM_PARTICLES / WORK_GROUP_SIZE, 1, 1);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
shaderUtil.DeleteCompute();
Draw
shaderUtil.Use();
glUniformMatrix4fv(glGetUniformLocation(shaderUtil.getProgramID(), "projection"), 1, GL_FALSE, glm::value_ptr(projection));
glUniformMatrix4fv(glGetUniformLocation(shaderUtil.getProgramID(), "modelview"), 1, GL_FALSE, glm::value_ptr(View * Model));
glPointSize(10);
// Rendu
glBindBuffer(GL_ARRAY_BUFFER, shaderUtil.getSSBOpos());
glVertexPointer(4, GL_FLOAT, 0, (void *)0);
glEnableClientState(GL_VERTEX_ARRAY);
glDrawArrays(GL_POINTS, 0, NUM_PARTICLES);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
shaderUtil.Delete();
Comput shader
#version 430 compatibility
#extension GL_ARB_compute_shader : enable
#extension GL_ARB_shader_storage_buffer_object : enable
layout(std140, binding = 4) buffer Pos
{
vec4 Positions[]; // array of structures
};
layout(std140, binding = 5) buffer Vel
{
vec4 Velocities[]; // array of structures
};
uniform float dt;
layout(local_size_x = 128, local_size_y = 1, local_size_z = 1) in;
void main()
{
uint numParticule = gl_GlobalInvocationID.x;
vec4 v = Velocities[numParticule];
vec4 p = Positions[numParticule];
vec4 tmp = vec4(0, -9.81, 0,0) + v * (0.001 / (7. / 1000.));
v += tmp ;
Velocities[numParticule] = v;
p += v ;
Positions[numParticule] = p;
}
Do you know why it's happened ?
Related
I've defined a block of vertex data like so:
struct vertex {
float x, y, u, v;
};
struct vertex_group {
vertex tl, bl, br, tr;
glm::vec4 color;
vertex_group(float x, float y, float width, float height, glm::vec4 c) {
tl.x = x; tl.y = y + height; tl.u = 0; tl.v = 0;
bl.x = x; bl.y = y; bl.u = 0; bl.v = 1;
br.x = x + width; br.y = y; br.u = 1; br.v = 1;
tr.x = x + width; tr.y = y + height; tr.u = 1; tr.v = 0;
color = c;
}
vertex_group(positioned_letter const& l) :
vertex_group(l.x, l.y, l.width, l.height, l.l.color) {
}
const float * data() const {
return &tl.x;
}
};
The attribute pointers are set like this:
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)(4 * 4 * sizeof(GLfloat)));
And the draw code is invoked like so:
vertex_group vertices(l);
glBindTexture(GL_TEXTURE_2D, g.texture);
glBindBuffer(GL_ARRAY_BUFFER, objects.rect_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices.data(), GL_STREAM_DRAW);
glDrawArrays(GL_QUADS, 0, 4);
The basic idea is that all four vertices of the quad should all be using the same data for color, even as they require different values for position and texture data. However, when I set the color to red (1,0,0,1), the results on screen are.... not quite right.
Just for reference sake, if the *only changes I make are to the first two sections of code, to the following:
struct vertex {
float x, y, u, v;
};
struct vertex_group {
vertex tl;
glm::vec4 color1;
vertex bl;
glm::vec4 color2;
vertex br;
glm::vec4 color3;
vertex tr;
glm::vec4 color4;
vertex_group(float x, float y, float width, float height, glm::vec4 c) {
tl.x = x; tl.y = y + height; tl.u = 0; tl.v = 0;
bl.x = x; bl.y = y; bl.u = 0; bl.v = 1;
br.x = x + width; br.y = y; br.u = 1; br.v = 1;
tr.x = x + width; tr.y = y + height; tr.u = 1; tr.v = 0;
color1 = color2 = color3 = color4 = c;
}
vertex_group(positioned_letter const& l) :
vertex_group(l.x, l.y, l.width, l.height, l.l.color) {
}
const float * data() const {
return &tl.x;
}
};
(other part)
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (void*)(4 * sizeof(GLfloat)));
It renders correctly:
So in a nutshell, my question is: I'd like to structure my data (and render with it) like xyuvxyuvxyuvxyuvrgba but the only way I can get it to work is by doing xyuvrgbaxyuvrgbaxyuvrgbaxyuvrgba. How do I set my pointers/call the draw function so that I can use the first method?
You can't do that. But you can achieve this layout using instanced rendering:
xyuvxyuvxyuvxyuv // <- only once
whrgbawhrgbawhrgbawhrgba... // <- repeated per glyph
where w and h are the sizes of each quad that are to be applied in the vertex shader.
Here I split it in two buffers, but you can technically load it all into one buffer. Also I use the OpenGL 4.5 bindless API here because I think that it is easier to use. If you don't have it yet then you can change it to use the older calls accordingly.
float quad[] = {
0, 1, 0, 0,
0, 0, 0, 1,
1, 1, 1, 0,
1, 0, 1, 1,
};
struct Instance {
vec2 size;
// TODO: add index of the glyph you want to render
vec4 color;
};
Instance inst[] = { ... };
int ninst = sizeof(inst)/sizeof(inst[0]);
GLuint quad_buf = ... create buffer from quad[] ...;
GLuint inst_buf = ... create buffer from inst[] ...;
GLuint vao;
glCreateVertexArrays(1, &vao);
glEnableVertexArrayAttrib(vao, 0);
glVertexArrayAttribFormat(vao, 0, 4, GL_FLOAT, GL_FALSE, 0);
glVertexArrayAttribBinding(vao, 0, 0); // from 0th buffer
glEnableVertexArrayAttrib(vao, 1);
glVertexArrayAttribFormat(vao, 1, 2, GL_FLOAT, GL_FALSE, offsetof(Instance, size));
glVertexArrayAttribBinding(vao, 1, 1); // from 1st buffer
glEnableVertexArrayAttrib(vao, 2);
glVertexArrayAttribFormat(vao, 2, 4, GL_FLOAT, GL_FALSE, offsetof(Instance, color));
glVertexArrayAttribBinding(vao, 2, 1); // from 1st buffer
glVertexArrayVertexBuffer(vao, 0, quad_buf, 0, sizeof(float)*4); // 0th buffer is the quad
glVertexArrayVertexBuffer(vao, 1, inst_buf, 0, sizeof(Instance)); // 1th buffer for instances
glVertexArrayBindingDivisor(vao, 1, 1); // 1st buffer advances once per instance
// to draw:
glBindTexture(...);
glBindVertexArray(vao);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ninst);
I'm having problems using Transform Feedback buffers with OpenGL version 4.4.
I'm using geometry shader output for capturing and drawing triangles.
The triangles will be culled by some algorithm in the geometry shader and I want to capture the resulting triangles.
So the result triangle count should be less than the input count.
When I specify the in-shader layout specifiers(xfb_buffer,etc.), no primitive is captured.
I changed it back to glTransformFeedbackVaryings, and now primitives are being captured but instead of 3 vertices per triangle (9 floats) it captures less as the resulting buffer's size is not dividible by 3.
Initialization:
// ---------------
// VERTEX BUFFER
// ---------------
glGenBuffers(1, &vertexBufferObjectId_);
// Init vertex data
unsigned int numVerts = width * height;
size_t vertexSize = (3 + 2) * sizeof(GLfloat); // position, texcoord
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObjectId_);
glBufferData(GL_ARRAY_BUFFER, numVerts * vertexSize, 0, GL_STATIC_DRAW);
uint8_t* vertices = (uint8_t*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int y = 0; y < height; ++y)
{
for (unsigned int x = 0; x < width; ++x)
{
*(float*)(vertices+0) = x; // x
*(float*)(vertices+4) = y; // y
*(float*)(vertices+8) = 0; // z
*(float*)(vertices+12) = (float)x / (width - 1); // u
*(float*)(vertices+16) = (float)y / (height - 1); // v
vertices += vertexSize;
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// ---------------
// INDEX BUFFER
// ---------------
glGenBuffers(1, &indexBufferObjectId_);
glEnable(GL_PRIMITIVE_RESTART);
glPrimitiveRestartIndex(-1); // accepts GLuint, so this will be the largest possible value
// Init index data
unsigned int numPolys = 2 * (width - 1) * (height - 1) * 2;
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObjectId_);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, numPolys * 3 * sizeof(GLuint), 0, GL_STATIC_DRAW);
GLuint* indices = (GLuint*)glMapBuffer(GL_ELEMENT_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int y = 0; y < height-1; ++y)
{
for (unsigned int x = 0; x < width-1; ++x)
{
// i11 is at (x,y)
// i20
// i11 i21 i31
// i02 i12 i22
// i13
uint32_t i20 = get_index(width,height,x+1,y-1);
uint32_t i11 = get_index(width,height,x,y);
uint32_t i21 = get_index(width,height,x+1,y);
uint32_t i31 = get_index(width,height,x+2,y);
uint32_t i02 = get_index(width,height,x-1,y+1);
uint32_t i12 = get_index(width,height,x,y+1);
uint32_t i22 = get_index(width,height,x+1,y+1);
uint32_t i13 = get_index(width,height,x,y+2);
// first triangle: i12,i22,i21
// i21
// i12 i22
// with adjacency: i12,i13,i22,i31,i21,i11
// i11 i21 i31
// i12 i22
// i13
*(indices++) = i12;
*(indices++) = i13;
*(indices++) = i22;
*(indices++) = i31;
*(indices++) = i21;
*(indices++) = i11;
// second triangle: i12,i21,i11
// i11 i21
// i12
// with adjacency: i12,i22,i21,i20,i11,i02
// i20
// i11 i21
// i02 i12 i22
*(indices++) = i12;
*(indices++) = i22;
*(indices++) = i21;
*(indices++) = i20;
*(indices++) = i11;
*(indices++) = i02;
}
}
glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// ---------------
// TRANSFORM FEEDBACK BUFFER
// ---------------
if (feedbackVertexBufferObjectId_)
glDeleteBuffers(1, &feedbackVertexBufferObjectId_);
glGenBuffers(1, &feedbackVertexBufferObjectId_);
glBindBuffer(GL_ARRAY_BUFFER, feedbackVertexBufferObjectId_);
glBufferData(GL_ARRAY_BUFFER, numPolys * 3 * 3 * sizeof(GLfloat), 0, GL_STREAM_READ);
Rendering:
meshRenderShader_.enable();
// Set to write to the framebuffer.
glBindFramebuffer(GL_FRAMEBUFFER, camData.framebuffer_.framebufferId_);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// ----------------------------------
// Render projected mesh
// Binding vertex buffer
size_t vertexSize = (3 + 2) * sizeof(GLfloat);
glBindBuffer(GL_ARRAY_BUFFER, camData.mesh_.vertexBufferObjectId_);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, vertexSize, (const GLvoid*)0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, vertexSize, (const GLvoid*)(sizeof(GLfloat) * 3));
// Binding index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, camData.mesh_.indexBufferObjectId_);
// Bind transform feedback and target buffer
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, camData.mesh_.feedbackVertexBufferObjectId_);
GLuint query;
glGenQueries(1, &query);
// glEnable(GL_RASTERIZER_DISCARD);
// Begin transform feedback
glBeginQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN, query);
glBeginTransformFeedback(GL_TRIANGLES);
// Set shader uniforms
meshRenderShader_.setUniformMat4("ModelViewProjection", (viewProjMatrix * camData.transform_).data());
// Binding texture
meshRenderShader_.setUniformTexture("colorTexture", camData.mesh_.textureId_, 0);
meshRenderShader_.setUniformTexture("depthTexture", camData.mesh_.depthTextureId_, 1);
meshRenderShader_.setUniform2f("depthSize", camData.intrinsic_.width, camData.intrinsic_.height);
meshRenderShader_.setUniform2f("intrinsic_f", camData.intrinsic_.fx, camData.intrinsic_.fy);
meshRenderShader_.setUniform2f("intrinsic_c", camData.intrinsic_.cx, camData.intrinsic_.cy);
// Drawing elements to textures
glDrawElements(GL_TRIANGLES_ADJACENCY, camData.mesh_.numPolys_ * 3, GL_UNSIGNED_INT, (const GLvoid*) 0);
// End transform feedback
glEndTransformFeedback();
glEndQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN);
// glDisable(GL_RASTERIZER_DISCARD);
// glFlush();
glGetQueryObjectuiv(query, GL_QUERY_RESULT, &camData.mesh_.primitivesWritten_);
// glDrawTransformFeedback(GL_TRIANGLES_ADJACENCY, camData.mesh_.feedbackbufferId_);
glDeleteQueries(1, &query);
// Unbinding buffers
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER,0);
// Disable shader
meshRenderShader_.disable();
Relevant part of GLSL geometry shader:
layout(xfb_buffer = 0) out vec3 xPosition;
if (cull)
{
gl_Position = ModelViewProjection * gl_in[0].gl_Position;
fTexCoord = gTexCoord[0];
xPosition = gl_Position.xyz;
EmitVertex();
gl_Position = ModelViewProjection * gl_in[2].gl_Position;
fTexCoord = gTexCoord[2];
xPosition = gl_Position.xyz;
EmitVertex();
gl_Position = ModelViewProjection * gl_in[4].gl_Position;
fTexCoord = gTexCoord[4];
xPosition = gl_Position.xyz;
EmitVertex();
EndPrimitive();
}
What can be the solution?
As a project, I have to generate a random NxN rough terrain in modern opengl. For this, I use a height map, rendering each 2xN row with triangle strip.
Shaders are basic, specifying a shade of yellow corresponding to the height(so I can see the bends; I have a top-down camera). Interpolation is on, but for some reason, weird sharp triangular shapes get rendered.
1) They always appear on the right side of the screen.
2) They are bigger than the unit triangle I render.
eg: I don't have the reputation to post images, so...
8x8 http://imgbox.com/flC187WW
128x128 http://i.imgbox.com/f1ebrk0V.png
And here's the code:
void drawMeshRow(int rno, float oy) {
GLfloat meshVert[MESHSIZE * 2 * 3];
for(int i = 0; i < 2 * MESHSIZE; ++i) {
meshVert[3*i] = (i/2)*(2.0/(MESHSIZE-1)) - 1;
if(i & 1) {
meshVert[3*i + 1] = oy;
meshVert[3*i + 2] = heightMap[rno][i/2];
}
else {
meshVert[3*i + 1] = oy + (2.0/(MESHSIZE-1));
meshVert[3*i + 2] = heightMap[rno + 1][i/2];
}
}
glBufferData(GL_ARRAY_BUFFER, 2 * 3 * MESHSIZE * sizeof(GLfloat), meshVert, GL_STREAM_DRAW);
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2 * 3);
}
void drawMesh() {
glUseProgram(shader);
glBindBuffer(GL_ARRAY_BUFFER, meshBuff);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
for(int i = 0; i < MESHSIZE - 1; ++i)
drawMeshRow(i, (2.0 / (MESHSIZE - 1)) * i - 1);
glDisableVertexAttribArray(0);
}
drawMesh is called each iteration of the main loop.
Shaders:
Vertex shader
#version 330 core
layout(location = 0) in vec3 pos;
smooth out float height;
void main() {
gl_Position.xyz = pos;
height = pos.z;
gl_Position.w = 1.0;
}
Fragment Shader
#version 330 core
out vec3 pcolor;
smooth in float height;
void main() {
pcolor = vec3(1.0, 1.0, height);
}
You're passing the wrong count to glDrawArrays():
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2 * 3);
The last argument is the vertex count, while the value you pass is the total number of coordinates. The correct call is:
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2);
EDIT: I'm thinking the problem might be when I'm loading the vertices and indices. Maybe focus on that section :)
I'm trying to load a heightmap from a bmp file and displaying it in OpenGL. As with most things I try, everything compiles and runs without errors but nothing is drawn on the screen. I can't seem to isolate the issue that much, since all the code works on its own, but when combined to draw terrain, nothing works.
Terrain class
I have a terrain class. It has 2 VBOs, 1 IBO and 1 VAO. It also stores the vertices, indices, colours of the vertices and the heights. It is loaded from a bmp file.
Loading terrain:
Terrain* Terrain::loadTerrain(const std::string& filename, float height)
{
BitMap* bmp = BitMap::load(filename);
Terrain* t = new Terrain(bmp->width, bmp->length);
for(unsigned y = 0; y < bmp->length; y++)
{
for(unsigned x = 0; x < bmp->width; x++)
{
unsigned char color =
(unsigned char)bmp->pixels[3 * (y * bmp->width + x)];
float h = height * ((color / 255.0f) - 0.5f);
t->setHeight(x, y, h);
}
}
delete bmp;
t->initGL();
return t;
}
Initializing the buffers:
void Terrain::initGL()
{
// load vertices from heights data
vertices = new Vector4f[w * l];
int vertIndex = 0;
for(unsigned y = 0; y < l; y++)
{
for(unsigned x = 0; x < w; x++)
{
vertices[vertIndex++] = Vector4f((float)x, (float)y, heights[y][x], 1.0f);
}
}
// generate indices for indexed drawing
indices = new GLshort[(w - 1) * (l - 1) * 6]; // patch count * 6 (2 triangles per terrain patch)
int indicesIndex = 0;
for(unsigned y = 0; y < (l - 1); ++y)
{
for(unsigned x = 0; x < (w - 1); ++x)
{
int start = y * w + x;
indices[indicesIndex++] = (GLshort)start;
indices[indicesIndex++] = (GLshort)(start + 1);
indices[indicesIndex++] = (GLshort)(start + w);
indices[indicesIndex++] = (GLshort)(start + 1);
indices[indicesIndex++] = (GLshort)(start + 1 + w);
indices[indicesIndex++] = (GLshort)(start + w);
}
}
// generate colours for the vertices
colours = new Vector4f[w * l];
for(unsigned i = 0; i < w * l; i++)
{
colours[i] = Vector4f(0.0f, 1.0f, 0.0f, 1.0f); // let's make the entire terrain green
}
// THIS CODE WORKS FOR CUBES (BEGIN)
// vertex buffer object
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// index buffer object
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// colours vertex buffer object
glGenBuffers(1, &colour_vbo);
glBindBuffer(GL_ARRAY_BUFFER, colour_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(colours), colours, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// create vertex array object
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, colour_vbo);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBindVertexArray(0);
// THIS CODE WORKS FOR CUBES (END)
}
The part where I create the VBOs, IBO and VAO works fine for cubes, they are drawn nicely.
Rendering terrain:
void Terrain::render()
{
glUseProgram(shaderProgram);
glBindVertexArray(vao);
int indices_length = (w - 1) * (l - 1) * 6;
glDrawElements(GL_TRIANGLES, indices_length, GL_UNSIGNED_SHORT, 0);
}
Shaders
These are the vertex and fragment shaders.
Vertex:
#version 330
layout (location = 0) in vec4 position;
layout (location = 1) in vec4 vertexColour;
out vec4 fragmentColour;
uniform vec3 offset;
uniform mat4 perspectiveMatrix;
void main()
{
vec4 cameraPos = position + vec4(offset.x, offset.y, offset.z, 0.0);
gl_Position = perspectiveMatrix * cameraPos;
fragmentColour = vertexColour;
}
Fragment:
#version 330
in vec4 fragmentColour;
out vec4 outputColour;
void main()
{
outputColour = fragmentColour;
}
Perspective matrix
Here are the settings for the "camera":
struct CameraSettings
{
static const float FRUSTUM_SCALE = 1.0f;
static const float Z_NEAR = 0.5f;
static const float Z_FAR = 3.0f;
static float perspective_matrix[16];
};
float CameraSettings::perspective_matrix[16] = {
FRUSTUM_SCALE,
0, 0, 0, 0,
FRUSTUM_SCALE,
0, 0, 0, 0,
(Z_FAR + Z_NEAR) / (Z_NEAR - Z_FAR),
-1.0f,
0, 0,
(2 * Z_FAR * Z_NEAR) / (Z_NEAR - Z_FAR),
0
};
The uniforms get filled in after initGL() is called:
// get offset uniform
offsetUniform = ShaderManager::getUniformLocation(shaderProgram, "offset");
perspectiveMatrixUniform = ShaderManager::getUniformLocation(shaderProgram, "perspectiveMatrix");
// set standard uniform data
glUseProgram(shaderProgram);
glUniform3f(offsetUniform, xOffset, yOffset, zOffset);
glUniformMatrix4fv(perspectiveMatrixUniform, 1, GL_FALSE, CameraSettings::perspective_matrix);
glUseProgram(0);
Could someone check out my code and give suggestions?
I'm pretty sure that when you say :
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
you actually want to say :
glBufferData(GL_ARRAY_BUFFER, sizeof (Vector4f) * w * l, vertices, GL_STATIC_DRAW);
(same to color buffer, etc)
I want to render a Quad via VAO, IBO and VBO but nothing is drawn. I'm using glDrawRangeElements in OS X OpenGL 3.2 Core context. The screen is completely black without any error. GLFW3 is used to create window and context.
Window opening/Context creation code
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
_mainWindow = glfwCreateWindow(width, height, title, monitor, NULL);
if(_mainWindow == NULL)
{
return false;
}
_mainWindowWidth = width;
_mainWindowHeight = height;
glfwSetKeyCallback(_mainWindow, _onKeyEvent);
glfwMakeContextCurrent(_mainWindow);
glewExperimental = GL_TRUE;
glewInit();
_openGLVersion = reinterpret_cast<const char*>(glGetString(GL_VERSION));
Shader sources (compiled successfully). Custom FragColor is binded.
_vertexShaderSource =
"#version 150 core\n"
"in vec3 in_Position;\n"
"in vec3 in_Normal;\n"
"in vec2 in_TexCoord;\n"
"uniform mat4 ModelViewProjection;\n"
"void main()\n"
"{\n"
" gl_Position = ModelViewProjection * vec4(in_Position, 1.0);\n"
"}\n";
_fragmentShaderSource =
"#version 150 core\n"
"out vec4 FColor;"
"void main()\n"
"{\n"
" FColor = vec4(1.0, 1.0, 1.0, 1.0);\n"
"}\n";
Vertices
Vertex* vertices = new Vertex[6];
vertices[0].nz = 1.0f;
vertices[1].nz = 1.0f;
vertices[2].nz = 1.0f;
vertices[3].nz = 1.0f;
vertices[4].nz = 1.0f;
vertices[5].nz = 1.0f;
vertices[1].y = height;
vertices[1].v0 = 1.0f;
vertices[2].x = width;
vertices[2].y = height;
vertices[2].u0 = 1.0f;
vertices[2].v0 = 1.0f;
vertices[4].x = width;
vertices[4].u0 = 1.0f;
vertices[5].x = width;
vertices[5].y = height;
vertices[5].u0 = 1.0f;
vertices[5].v0 = 1.0f;
_mesh->setVertices(vertices, 6);
vertices = NULL;
Uint16* indices = new Uint16[6];
indices[0] = 0;
indices[1] = 2;
indices[2] = 3;
indices[3] = 0;
indices[4] = 1;
indices[5] = 3;
_mesh->setIndices(indices);
indices = NULL;
Buffer update (checked the data, it seems to be correct)
// Update VBO
if(_vertexBufferObjectID == 0)
{
glGenBuffers(1, &_vertexBufferObjectID);
}
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
float* data = new float[_vertexCount * sizeof(Vertex) / sizeof(float)];
Uint64 begin = 0;
for(Uint32 i = 0; i < _vertexCount; i++)
{
begin = i * 8;
data[begin] = _vertices[i].x;
data[begin + 1] = _vertices[i].y;
data[begin + 2] = _vertices[i].z;
data[begin + 3] = _vertices[i].nx;
data[begin + 4] = _vertices[i].ny;
data[begin + 5] = _vertices[i].nz;
data[begin + 6] = _vertices[i].u0;
data[begin + 7] = _vertices[i].v0;
}
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex) * _vertexCount, &data[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
delete[] data;
data = NULL;
// Update IBO
if(_indexBufferObjectID == 0)
{
glGenBuffers(1, &_indexBufferObjectID);
}
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indexBufferObjectID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Uint16) * _vertexCount, &_indices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// Update VAO
if(_vertexArrayObjectID == 0)
{
glGenVertexArrays(1, &_vertexArrayObjectID);
}
glBindVertexArray(_vertexArrayObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
// Vertices
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), ((char*)NULL + (0)));
// Normals
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), ((char*)NULL + (12)));
// TexCoords
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), ((char*)NULL + (24)));
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indexBufferObjectID);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
Rendering code
glUseProgram(material->_programID);
GLuint mvp = glGetUniformLocation(material->_programID, "ModelViewProjection");
glUniformMatrix4fv(mvp, 1, false, glm::value_ptr(camera_mvp));
glBindVertexArray(node->_mesh->_vertexArrayObjectID);
// Draw
glDrawRangeElements(GL_TRIANGLES, 0, 3, node->_mesh->_vertexCount, GL_UNSIGNED_SHORT, NULL);
glBindVertexArray(0);
Note:
camera_mvp is Orthographic
0.00333333_0___________0 0
0__________0.00333333__0 0
0__________0__________-1 0
599________599_________0 1
Program Linking
_programID = glCreateProgram();
glAttachShader(_programID, vertex_shader);
glAttachShader(_programID, fragment_shader);
glLinkProgram(_programID);
glGetProgramiv(_programID, GL_LINK_STATUS, &result);
glGetProgramiv(_programID, GL_INFO_LOG_LENGTH, &loglen);
if(loglen > 0)
{
char* log = new char[loglen];
glGetProgramInfoLog(_programID, loglen, 0, log);
_lastInfoLog = log;
delete log;
log = NULL;
}
if(result == GL_FALSE)
{
glDeleteProgram(_programID);
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
return false;
}
glUseProgram(_programID);
glBindAttribLocation(_programID, 0, "in_Position");
glBindAttribLocation(_programID, 1, "in_Normal");
glBindAttribLocation(_programID, 2, "in_TexCoord");
glBindFragDataLocation(_programID, 0, "FColor");
glUseProgram(0);
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
You don't have layout in #version 150 so you have to use glGetAttribLocation() to ask OpenGL where it put your attributes and adjust the first parameter of your glVertexAttribPointer() calls appropriately.
Make sure you bind your program before calling glGetAttribLocation().