I am working on an OpenGL engine and my textures are being rendered weirdly. The textures are mostly full and working, but they have little weird interruptions. Here's what it looks like.
The bottom right corner is what the textures are supposed to look like, there are also randomly colored squares of blue peppered in there. These solid squares (not textured) do not have these interruptions.
I can provide code, but I'm not sure what to show because I've checked everywhere and I don't know where the problem is from.
I am working on a Java and a C++ version. Here is the renderer in Java (If you want to see something else just ask):
public class BatchRenderer2D extends Renderer2D {
private static final int MAX_SPRITES = 60000;
private static final int VERTEX_SIZE = Float.BYTES * 3 + + Float.BYTES * 2 + Float.BYTES * 1 + Float.BYTES * 1;
private static final int SPRITE_SIZE = VERTEX_SIZE * 4;
private static final int BUFFER_SIZE = SPRITE_SIZE * MAX_SPRITES;
private static final int INDICES_SIZE = MAX_SPRITES * 6;
private static final int SHADER_VERTEX_INDEX = 0;
private static final int SHADER_UV_INDEX = 1;
private static final int SHADER_TID_INDEX = 2;
private static final int SHADER_COLOR_INDEX = 3;
private int VAO;
private int VBO;
private IndexBuffer IBO;
private int indexCount;
private FloatBuffer buffer;
private List<Integer> textureSlots = new ArrayList<Integer>();
public BatchRenderer2D() {
init();
}
public void destroy() {
IBO.delete();
glDeleteBuffers(VBO);
glDeleteVertexArrays(VAO);
glDeleteBuffers(VBO);
}
public void init() {
VAO = glGenVertexArrays();
VBO = glGenBuffers();
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, BUFFER_SIZE, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(SHADER_VERTEX_INDEX);
glEnableVertexAttribArray(SHADER_UV_INDEX);
glEnableVertexAttribArray(SHADER_TID_INDEX);
glEnableVertexAttribArray(SHADER_COLOR_INDEX);
glVertexAttribPointer(SHADER_VERTEX_INDEX, 3, GL_FLOAT, false, VERTEX_SIZE, 0);
glVertexAttribPointer(SHADER_UV_INDEX, 2, GL_FLOAT, false, VERTEX_SIZE, 3 * 4);
glVertexAttribPointer(SHADER_TID_INDEX, 1, GL_FLOAT, false, VERTEX_SIZE, 3 * 4 + 2 * 4);
glVertexAttribPointer(SHADER_COLOR_INDEX, 4, GL_UNSIGNED_BYTE, true, VERTEX_SIZE, 3 * 4 + 2 * 4 + 1 * 4);
glBindBuffer(GL_ARRAY_BUFFER, 0);
int[] indices = new int[INDICES_SIZE];
int offset = 0;
for (int i = 0; i < INDICES_SIZE; i += 6) {
indices[ i ] = offset + 0;
indices[i + 1] = offset + 1;
indices[i + 2] = offset + 2;
indices[i + 3] = offset + 2;
indices[i + 4] = offset + 3;
indices[i + 5] = offset + 0;
offset += 4;
}
IBO = new IndexBuffer(indices, INDICES_SIZE);
glBindVertexArray(0);
}
#Override
public void begin() {
glBindBuffer(GL_ARRAY_BUFFER, VBO);
buffer = glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY).asFloatBuffer();
}
#Override
public void submit(Renderable2D renderable) {
Vector3f position = renderable.getPosition();
Vector2f size = renderable.getSize();
Vector4f color = renderable.getColor();
List<Vector2f> uv = renderable.getUV();
float tid = renderable.getTID();
float c = 0;
float ts = 0.0f;
if (tid > 0) {
boolean found = false;
for(int i = 0; i < textureSlots.size(); i++) {
if(textureSlots.get(i) == tid) {
ts = (float)(i + 1);
found = true;
break;
}
}
if(!found) {
if(textureSlots.size() >= 32) {
end();
flush();
begin();
}
textureSlots.add((int)tid);
ts = (float)textureSlots.size();
}
} else {
int r = (int) (color.x * 255);
int g = (int) (color.y * 255);
int b = (int) (color.z * 255);
int a = (int) (color.w * 255);
c = Float.intBitsToFloat((r << 0) | (g << 8) | (b << 16) | (a << 24));
}
transformationBack.multiply(position).store(buffer);
uv.get(0).store(buffer);
buffer.put(ts);
buffer.put(c);
transformationBack.multiply(new Vector3f(position.x, position.y + size.y, position.z)).store(buffer);
uv.get(1).store(buffer);
buffer.put(ts);
buffer.put(c);
transformationBack.multiply(new Vector3f(position.x + size.x, position.y + size.y, position.z)).store(buffer);
uv.get(2).store(buffer);
buffer.put(ts);
buffer.put(c);
transformationBack.multiply(new Vector3f(position.x + size.x, position.y, position.z)).store(buffer);
uv.get(3).store(buffer);
buffer.put(ts);
buffer.put(c);
indexCount += 6;
}
#Override
public void end() {
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
#Override
public void flush() {
for(int i = 0; i < textureSlots.size(); i++) {
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, textureSlots.get(i));
}
glBindVertexArray(VAO);
IBO.bind();
glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_INT, NULL);
IBO.unbind();
glBindVertexArray(0);
indexCount = 0;
}
}
You didn't provide but I'm pretty sure I know the reason (had same problem, following The Cherno tutorial? ;)). Just as information, what is your gpu? (It seems AMD has more problems). Linking my thread for source
Important part:
Fragment Shader:
#version 330 core
if(fs_in.tid > 0.0){
int tid = int(fs_in.tid - 0.5);
texColor = texture(textures[tid], fs_in.uv);
}
What you try to do here is not allowed as per the GLSL 3.30 specification which states
Samplers aggregated into arrays within a shader (using square brackets [ ]) can only be indexed with integral constant expressions (see section 4.3.3 “Constant Expressions”).
Your tid is not a constant, so this will not work.
In GL 4, this constraint has been somewhat relaxed to (quote is from GLSL 4.50 spec):
When aggregated into arrays within a shader, samplers can only be indexed with a dynamically uniform integral expression, otherwise results are undefined.
Your now your input also isn't dynamically uniform either, so you will get undefined results too.
(Thanks derhass)
One "simple" solution(but not pretty and I believe with a small impact on performance):
switch(tid){
case 0: textureColor = texture(textures[0], fs_in.uv); break;
...
case 31: textureColor = texture(textures[31], fs_in.uv); break;
}
Also, as a small note, you're doing a lot of matrix multiplication there for squares, you could simply multiply the first one and then go and add the values, it boosted my performance around 200 fps's (in your example, multiply, then add y, then add x, then subtract y again)
Edit:
Clearly my algebra is not where it should be, what I said you could do(and is now with strike) is completely wrong, sorry
Related
This is what happens when I draw switching from the black texture to the lime green one in a simple for loop. It seems to have bits from the previously drawn texture.
Here's a simplified version of how my renderer works
Init(): Create my VAO and attrib pointers and generate element buffer and indicies
Begin(): Bind my vertex buffer and map the buffer pointer
Draw(): Submit a renderable to draw which gets 4 vertecies in the vertex buffer each get a position, color, texCoords, and a Texture Slot
End(): I delete the buffer pointer, bind my VAO, IBO, and textures to their active texture slots and draw the elements.
I do this every frame (except init). What I don't understand is if I draw PER TEXTURE, only having one active then this doesn't happen. It's when I have multiple active textures and they are bound.
Here's my renderer
void Renderer2D::Init()
{
m_Textures.reserve(32);
m_VertexBuffer.Create(nullptr, VERTEX_BUFFER_SIZE);
m_Layout.PushFloat(2); //Position
m_Layout.PushUChar(4); //Color
m_Layout.PushFloat(2); //TexCoords
m_Layout.PushFloat(1); //Texture ID
//VA is bound and VB is unbound
m_VertexArray.AddBuffer(m_VertexBuffer, m_Layout);
unsigned int* indices = new unsigned int[INDEX_COUNT];
int offset = 0;
for (int i = 0; i < INDEX_COUNT; i += 6)
{
indices[i + 0] = offset + 0;
indices[i + 1] = offset + 1;
indices[i + 2] = offset + 2;
indices[i + 3] = offset + 2;
indices[i + 4] = offset + 3;
indices[i + 5] = offset + 0;
offset += 4;
}
m_IndexBuffer.Create(indices, INDEX_COUNT);
m_VertexArray.Unbind();
}
void Renderer2D::Begin()
{
m_VertexBuffer.Bind();
m_Buffer = (VertexData*)m_VertexBuffer.GetBufferPointer();
}
void Renderer2D::Draw(Renderable2D& renderable)
{
const glm::vec2& position = renderable.GetPosition();
const glm::vec2& size = renderable.GetSize();
const Color& color = renderable.GetColor();
const glm::vec4& texCoords = renderable.GetTextureRect();
const float tid = AddTexture(renderable.GetTexture());
DT_CORE_ASSERT(tid != 0, "TID IS EQUAL TO ZERO");
m_Buffer->position = glm::vec2(position.x, position.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.x, texCoords.y);
m_Buffer->tid = tid;
m_Buffer++;
m_Buffer->position = glm::vec2(position.x + size.x, position.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.z, texCoords.y);
m_Buffer->tid = tid;
m_Buffer++;
m_Buffer->position = glm::vec2(position.x + size.x, position.y + size.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.z, texCoords.w);
m_Buffer->tid = tid;
m_Buffer++;
m_Buffer->position = glm::vec2(position.x, position.y + size.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.x, texCoords.w);
m_Buffer->tid = tid;
m_Buffer++;
m_IndexCount += 6;
}
void Renderer2D::End()
{
Flush();
}
const float Renderer2D::AddTexture(const Texture2D* texture)
{
for (int i = 0; i < m_Textures.size(); i++) {
if (texture == m_Textures[i]) // Compares memory addresses
return i + 1; // Returns the texture id plus one since 0 is null texture id
}
// If the texture count is already at or greater than max textures
if (m_Textures.size() >= MAX_TEXTURES)
{
End();
Begin();
}
m_Textures.push_back((Texture2D*)texture);
return m_Textures.size();
}
void Renderer2D::Flush()
{
m_VertexBuffer.DeleteBufferPointer();
m_VertexArray.Bind();
m_IndexBuffer.Bind();
for (int i = 0; i < m_Textures.size(); i++) {
glActiveTexture(GL_TEXTURE0 + i);
m_Textures[i]->Bind();
}
glDrawElements(GL_TRIANGLES, m_IndexCount, GL_UNSIGNED_INT, NULL);
m_IndexBuffer.Unbind();
m_VertexArray.Unbind();
m_IndexCount = 0;
m_Textures.clear();
}
Here's my fragment shader
#version 330 core
out vec4 FragColor;
in vec4 ourColor;
in vec2 ourTexCoord;
in float ourTid;
uniform sampler2D textures[32];
void main()
{
vec4 texColor = ourColor;
if(ourTid > 0.0)
{
int tid = int(ourTid - 0.5);
texColor = ourColor * texture(textures[tid], ourTexCoord);
}
FragColor = texColor;
}
I appreciate any help, let me know if you need to see more code
i don't know if you need this anymore but for the the record
you have a logical problem in your fragment code
let's think if your "ourTid" bigger than 0 let's take 1.0f for example
you subtract 0.5f , we cast it to int(0.5) it's 0 for sure now let's say that we need the texture number 2 and do the same process 2-0.5 = 1.5 "cast it to int" = 1
definitely you will have the previous texture every time
now the solution is easy you should add 0.5 instead of subtract it to be sure that the numbers interpolation is avoided and you got the correct texture.
I have just begun learning OpenGL, and I think there is a problem with my index array formula.
I'm trying to render a square terrain using IBO. When I draw with glDrawElements, the result would only appear on the bottom half of the screen, all tightly packed in a rectangular shape, while when I use glDrawArrays it works out perfectly with a square shaped and centered mesh.
I load my vertex height values from a grayscale, here is how I load vertices and create indices:
For vertices: right to left, bottom to top
int numVertices = image.width() * image.height() * 3;
float rowResize = image.width() / 2;
float colResize = image.height() / 2;
GLfloat* vertexData;
vertexData = new GLfloat[numVertices];
int counter = 0;
for (float j = 0; j < col; j++){
for (float i = 0; i < row; i++){
vertexData[counter++] = (i - rowResize) / rowResize;
vertexData[counter++] = (j - colResize) / colResize;
vertexData[counter++] = image.getColor(i,j) / 255.0f;
}
}
For indices: Trying to follow the order of {0, 1, 2, 1, 3, 2...}
2 3
-------
|\ |
| \ |
| \ |
| \ |
| \|
-------
0 1
int numIndices = (row - 1) * (col - 1) * 2 * 3;
unsigned short* indexData = new unsigned short[numIndices];
counter = 0;
for (short y = 0; y < col - 1; y++){
for (short x = 0; x < row - 1; x++){
// lower triangle
short L_first = y*row + x;
short L_second = L_first + 1;
short L_third = L_first + row;
//upper triangle
short U_first = L_first + 1;
short U_second = U_first + row;
short U_third = U_second - 1;
indexData[counter++] = L_first;
indexData[counter++] = L_second;
indexData[counter++] = L_third;
indexData[counter++] = U_first;
indexData[counter++] = U_second;
indexData[counter++] = U_third;
}
}
I initialized VAO, VBO and IBO, and then gen, bind, link data for each buffer object, and then unbind all.
In the game loop I have:
glBindVertexArray(VAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 3, 0);
//glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glDrawArrays(GL_POINTS, 0, numVertices);
//glDrawElements(GL_TRIANGLE_STRIP, numIndices, GL_UNSIGNED_SHORT, 0);
glBindVertexArray(0);
glfwSwapBuffers(window);
Since drawing from vertices works and drawing from indices doesn't, what could be wrong with my indices generation?
Thank you for your help!
(Weird thing: I just tried with another grayscale image, and it worked well with both drawing from verticesGL_POINTS and indicesGL_TRIANGLE_STRIP...welp)
Pictures
Using glDrawArrays
Using glDrawElements
As a project, I have to generate a random NxN rough terrain in modern opengl. For this, I use a height map, rendering each 2xN row with triangle strip.
Shaders are basic, specifying a shade of yellow corresponding to the height(so I can see the bends; I have a top-down camera). Interpolation is on, but for some reason, weird sharp triangular shapes get rendered.
1) They always appear on the right side of the screen.
2) They are bigger than the unit triangle I render.
eg: I don't have the reputation to post images, so...
8x8 http://imgbox.com/flC187WW
128x128 http://i.imgbox.com/f1ebrk0V.png
And here's the code:
void drawMeshRow(int rno, float oy) {
GLfloat meshVert[MESHSIZE * 2 * 3];
for(int i = 0; i < 2 * MESHSIZE; ++i) {
meshVert[3*i] = (i/2)*(2.0/(MESHSIZE-1)) - 1;
if(i & 1) {
meshVert[3*i + 1] = oy;
meshVert[3*i + 2] = heightMap[rno][i/2];
}
else {
meshVert[3*i + 1] = oy + (2.0/(MESHSIZE-1));
meshVert[3*i + 2] = heightMap[rno + 1][i/2];
}
}
glBufferData(GL_ARRAY_BUFFER, 2 * 3 * MESHSIZE * sizeof(GLfloat), meshVert, GL_STREAM_DRAW);
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2 * 3);
}
void drawMesh() {
glUseProgram(shader);
glBindBuffer(GL_ARRAY_BUFFER, meshBuff);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
for(int i = 0; i < MESHSIZE - 1; ++i)
drawMeshRow(i, (2.0 / (MESHSIZE - 1)) * i - 1);
glDisableVertexAttribArray(0);
}
drawMesh is called each iteration of the main loop.
Shaders:
Vertex shader
#version 330 core
layout(location = 0) in vec3 pos;
smooth out float height;
void main() {
gl_Position.xyz = pos;
height = pos.z;
gl_Position.w = 1.0;
}
Fragment Shader
#version 330 core
out vec3 pcolor;
smooth in float height;
void main() {
pcolor = vec3(1.0, 1.0, height);
}
You're passing the wrong count to glDrawArrays():
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2 * 3);
The last argument is the vertex count, while the value you pass is the total number of coordinates. The correct call is:
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2);
So I'm putting together a height map renderer that will do most of the work int he vertex shader, but first of course I generate a mesh to render, at the moment I'm playing around with upper limits of openGL and C++ to see how dense a mesh I can render (so I later have something to go by in terms of LoD mesh dividing)
ANYWAY! to cut to the issue;
the issue I noticed after testing a meshResolution of 32, 64 and at 128 I experienced runtime crashes, I stopped them by using the a self made class "indexFace" which holds 6 indices to lower the array length, problem is at 128 resolution only a 3rd of the mesh actually displays, I was wondering if there was a limit to how many indices openGL can render or hold using 1 set of BufferObjects or if its an issue with my handling of the C++ side of things.
I'm generating the mesh via the following:
void HeightMapMesh::GenerateMesh(GLfloat meshScale, GLushort meshResolution)
{
GLushort vertexCount = (meshResolution + 1) * (meshResolution + 1);
Vertex_Texture* vertexData = new Vertex_Texture[vertexCount];
GLushort indexCount = (meshResolution * meshResolution) * 6;
//indexFace holds 6 GLushort's in an attempt to overcome the array size limit
indexFace* indexData = new indexFace[meshResolution * meshResolution];
GLfloat scalar = meshScale / ((GLfloat)meshResolution);
GLfloat posX = 0;
GLfloat posY = 0;
for (int x = 0; x <= meshResolution; x++)
{
posX = ((GLfloat)x) * scalar;
for (int y = 0; y <= meshResolution; y++)
{
posY = ((GLfloat)y) * scalar;
vertexData[y + (x * (meshResolution + 1))] = Vertex_Texture(posX, posY, 0.0f, x, y);
}
}
GLint indexPosition;
GLint TL, TR, BL, BR;
for (int x = 0; x < meshResolution; x++)
{
for (int y = 0; y < meshResolution; y++)
{
indexPosition = (y + (x * (meshResolution)));
BL = y + (x * (meshResolution + 1));
TL = y + 1 + (x * (meshResolution + 1));
BR = y + ((x + 1) * (meshResolution + 1));
TR = y + 1 + ((x + 1) * (meshResolution + 1));
indexData[indexPosition] = indexFace(
BL, TR, TL,
BL, BR, TR
);
}
}
mesh.Fill(vertexData, vertexCount, (void *)indexData, indexCount, GL_STATIC_DRAW, GL_STATIC_DRAW);
delete [] vertexData;
delete [] indexData;
}
//This is for mesh.Fill()
void Fill(T* vertData, GLushort vertCount, void* indData, GLushort indCount, GLenum vertUsage, GLenum indUsage)
{
indexCount = indCount;
vertexCount = vertCount;
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObjectID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(T) * vertexCount, vertData, vertUsage);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLushort) * indexCount, indData, indUsage);
}
its because you made your indices shorts.
For example this: GLushort indexCount = (meshResolution * meshResolution) * 6; is hitting USHRT_MAX at a value of 105 for meshResolution. (105*105*6 = 66150 > 65535)
Use ints as indices. So change your indices everywhere to unsigned ints and do the final draw call like this:
glDrawElements( GL_QUADS, indCount, GL_UNSIGNED_INT, indices); //do this
//glDrawElements( GL_QUADS, indCount, GL_UNSIGNED_SHORT, indices); //instead of this
//also GL_QUADS is deprecated but it seems your data is in that format so I left it that way
You could save a bunch of indices if you drew GL_TRIANGLE_STRIPs instead or better yet do tesselation on the GPU, since this is like the perfect use-case for it.
I have implemented a simple (slow) method that would imitate OpenGL immediate mode for drawing lines.
Each frame, I add a pair of vertices, that indicate lines to vector structure, as well as add some specified or default color to another vector structure.
void WindowsGraphicsManager::vertex(float x, float y, float z) {
vertices_.push_back(x);
vertices_.push_back(y);
vertices_.push_back(z);
colors_.push_back(vertexColor_.getR());
colors_.push_back(vertexColor_.getG());
colors_.push_back(vertexColor_.getB());
colors_.push_back(vertexColor_.getA());
}
And at the end of each frame I clear these vectors.
My render code looks like this:
void WindowsGraphicsManager::renderVertices(Mat4 mat) {
if (vertices_.size() == 0) {
return;
}
static Shader* shader = (Shader*) services_->getRM()->get(
Resource::SHADER, "openglimmediate");
glUseProgram(shader->getId());
shader->setMatrix4(Shader::WVP, mat);
glEnableVertexAttribArray(shader->getHandle(Shader::POS));
glVertexAttribPointer(shader->getHandle(Shader::POS),
3, GL_FLOAT, GL_FALSE, 0, &vertices_[0]);
glEnableVertexAttribArray(shader->getHandle(Shader::COL));
glVertexAttribPointer(shader->getHandle(Shader::COL),
4, GL_FLOAT, GL_FALSE, 0, &colors_[0]);
//LOGI("Before crash.");
//LOGI("Vertices size: %d", vertices_.size());
//LOGI("Colors size: %d", colors_.size());
//INFO: Vertices size: 607590
//INFO: Colors size: 810120
glDrawArrays(GL_LINES, 0, vertices_.size() / 3);
CHECK_GL_ERROR("Rendering lines.");
//LOGI("After crash.");
glDisableVertexAttribArray(shader->getHandle(Shader::COL));
glDisableVertexAttribArray(shader->getHandle(Shader::POS));
vertices_.clear();
colors_.clear();
}
When I add 607590 floats (divide by 3 for vertices) to vertices vector, rendering crashes on line with glDrawArrays function. Strange thing though, when I first maximize the window and render, then it works fine for model with 607590 floats, though it still crashes for model with ~800k flaots.
What might be causing this?
[Edit] Before rendering vertices I call one other method. After removing it, rendering stopped crashing, so I guess I do something wrong here.
inline void WindowsGraphicsManager::renderNode(
Node* node, Mat4 mat, bool ortho)
{
if (!node->getState(Node::RENDERABLE)) {
return;
}
// Retrieve model data.
Renderable* renderable = 0;
Resource* resource = 0;
if (node->hasResource(Resource::SPRITE)) {
resource = node->getResource(Resource::SPRITE);
renderable = dynamic_cast<Renderable*>(resource);
}
else if (node->hasResource(Resource::STATIC_OBJECT)) {
resource = node->getResource(Resource::STATIC_OBJECT);
renderable = dynamic_cast<Renderable*>(resource);
StaticObject* so = static_cast<StaticObject*>(resource);
// Check for frustum culling.
if (so->getBoundingVolume() != 0
&& so->getBoundingVolume()->isInFrustum(
services_->getCamera(), node->getPos())
== BoundingVolume::OUTSIDE)
{
return;
}
}
else if (node->hasResource(Resource::DYNAMIC_OBJECT)) {
resource = node->getResource(Resource::DYNAMIC_OBJECT);
renderable = dynamic_cast<Renderable*>(resource);
}
if (renderable == 0) {
LOGW("Renderable with name \"%s\" is null.",
node->getName().c_str());
return;
}
// Retrieve node shader or use default.
Shader* shader = static_cast<Shader*>(
node->getResource(Resource::SHADER));
if (shader == 0) {
LOGW("Unable to retrieve shader for node: %s.",
node->getName().c_str());
return;
}
int shaderId = shader->getId();
// Select shader program to use.
glUseProgram(shaderId);
CHECK_GL_ERROR("glUseProgram");
Mat4 res;
if (!ortho) {
Matrix::multiply(mat, node->getMatrix(), res);
}
else {
Mat4 tmp;
Mat4 pos;
Mat4 rot;
Mat4 scale;
Vec3 p = node->getPos();
Vec3 r = node->getRot();
Vec3 s = node->getScale();
float width = s.getX();
float height = s.getY();
float x = p.getX();
float y = p.getY();
Matrix::translate(pos, x, y, p.getZ());
Matrix::rotateXYZ(rot, r.getX(), r.getY(), r.getZ());
Matrix::scale(scale, width, height, 1.0f);
Matrix::multiply(mat, pos, res);
Matrix::multiply(res, rot, tmp);
Matrix::multiply(tmp, scale, res);
}
// World * View * Projection matrix.
shader->setMatrix4(Shader::WVP, res);
// World matrix.
shader->setMatrix4(Shader::W, node->getMatrix());
// Normal matrix.
if (shader->hasHandle(Shader::N)) {
Mat3 normalMatrix;
Matrix::toMat3(node->getMatrix(), normalMatrix);
shader->setMatrix3(Shader::N, normalMatrix);
}
// Light position.
float* lightPos = new float[lights_.size() * 3];
if (lights_.size() > 0 && shader->hasHandle(Shader::LIGHT_POS)) {
for (UINT i = 0; i < lights_.size(); i++) {
Vec3& pos = lights_[i]->getPos();
lightPos[i * 3 + 0] = pos.getX();
lightPos[i * 3 + 1] = pos.getY();
lightPos[i * 3 + 2] = pos.getZ();
}
shader->setVector3(Shader::LIGHT_POS, lightPos, lights_.size());
}
delete lightPos;
// Light count.
shader->setInt(Shader::LIGHT_COUNT, lights_.size());
//shader->setVector3(Shader::LIGHT_POS,
// services_->getEnv()->getSunPos()->toArray());
// Eye position.
shader->setVector3(Shader::EYE_POS,
services_->getCamera()->getPos().toArray());
// Fog color.
if (shader->hasHandle(Shader::FOG_COLOR)) {
shader->setVector3(Shader::FOG_COLOR,
services_->getEnv()->getFogColor());
}
// Fog density.
shader->setFloat(Shader::FOG_DENSITY, services_->getEnv()->getFogDensity());
// Timer.
shader->setFloat(Shader::TIMER,
(float) services_->getSystem()->getTimeElapsed());
// Bind combined buffer object.
if (renderable->getCBO() > 0) {
int stride = renderable->getVertexStride();
glBindBuffer(GL_ARRAY_BUFFER, renderable->getCBO());
if (shader->hasHandle(Shader::POS)) {
glEnableVertexAttribArray(shader->getHandle(Shader::POS));
glVertexAttribPointer(
shader->getHandle(Shader::POS), 3, GL_FLOAT, GL_FALSE,
stride, ((char*) 0) + renderable->getPosOffset());
}
if (renderable->getNormalOffset() != -1
&& shader->hasHandle(Shader::NORMAL)) {
glEnableVertexAttribArray(shader->getHandle(Shader::NORMAL));
glVertexAttribPointer(
shader->getHandle(Shader::NORMAL), 3, GL_FLOAT, GL_FALSE,
stride, ((char*) 0) + renderable->getNormalOffset());
}
if (renderable->getUVOffset() != -1 && shader->hasHandle(Shader::UV)) {
glEnableVertexAttribArray(shader->getHandle(Shader::UV));
glVertexAttribPointer(
shader->getHandle(Shader::UV), 2, GL_FLOAT, GL_FALSE,
stride, ((char*) 0) + renderable->getUVOffset());
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
else {
return;
}
// Bind cube map.
if (node->hasResource(Resource::CUBE_MAP)
&& shader->hasHandle(Shader::CUBE_MAP)) {
glActiveTexture(GL_TEXTURE0);
CHECK_GL_ERROR("glActiveTexture");
CubeMap* t = static_cast<CubeMap*>(
node->getResource(Resource::CUBE_MAP));
glBindTexture(GL_TEXTURE_CUBE_MAP, t->getId());
CHECK_GL_ERROR("glBindTexture");
glUniform1i(shader->getHandle(Shader::CUBE_MAP), 0);
CHECK_GL_ERROR("glUniform1i");
}
int hTextures[8];
hTextures[0] = glGetUniformLocation(shader->getId(),
SHADER_MAIN_TEXTURE);
// Bind the texture.
vector<Resource*> textures = node->getResources(Resource::TEXTURE_2D);
UINT size = textures.size() < 8 ? textures.size() : 7;
UINT texture = 0;
for (UINT i = 0; i < size; i++) {
texture = i + 1;
const string& name = textures[i]->getName();
Texture2D* tex = static_cast<Texture2D*>(textures[i]);
string textName = name.substr(0, name.length() - 4);
hTextures[texture] = glGetUniformLocation(shader->getId(),
textName.c_str());
if (hTextures[texture] == -1) {
continue;
}
glActiveTexture(GL_TEXTURE0 + i + 1);
CHECK_GL_ERROR("glActiveTexture");
glBindTexture(GL_TEXTURE_2D, tex->getId());
CHECK_GL_ERROR("glBindTexture");
glUniform1i(hTextures[texture], texture);
CHECK_GL_ERROR("glUniform1i");
}
// Render node.
// BoundingVolume* volume = (*model->getBoundingVolumes())[i];
// if (model->hasBoundingVolumes()) {
// if (volume->isInFrustum(services_->getCamera(), node)
// == BoundingVolume::OUTSIDE) {
// continue;
// }
// }
int renderType;
switch (renderable->getRenderType()) {
case Renderable::RENDER_TYPE_POINTS:
renderType = GL_POINTS;
//glPointSize(renderable->getPointSize());
break;
case Renderable::RENDER_TYPE_LINES:
renderType = GL_LINES;
glLineWidth(renderable->getLineWidth());
break;
case Renderable::RENDER_TYPE_TRIANGLE_FAN:
renderType = GL_TRIANGLE_FAN;
break;
case Renderable::RENDER_TYPE_TRIANGLE_STRIP:
renderType = GL_TRIANGLE_STRIP;
break;
default:
renderType = GL_TRIANGLES;
break;
}
if (renderable->getWindingType() == Renderable::WINDING_TYPE_CCW) {
glFrontFace(GL_CCW);
}
else {
glFrontFace(GL_CW);
}
if (renderable->getCullFace()) {
glEnable(GL_CULL_FACE);
}
else {
glDisable(GL_CULL_FACE);
}
UINT renderCount = renderable->getRenderCount();
int lastTexture = 0;
for (UINT i = 0; i < renderable->getRenderCount(); i++) {
renderable->setRenderable(i);
// Ambient material color.
if (shader->hasHandle(Shader::AMBIENT)) {
shader->setVector3(Shader::AMBIENT,
renderable->getAmbient().toArray());
}
// Diffuse material color.
if (shader->hasHandle(Shader::DIFFUSE)) {
shader->setVector3(Shader::DIFFUSE,
renderable->getDiffuse().toArray());
}
// Specular material color.
if (shader->hasHandle(Shader::SPECULAR)) {
shader->setVector3(Shader::SPECULAR,
renderable->getSpecular().toArray());
}
// Specular material color intensity.
shader->setFloat(Shader::SPECULARITY, renderable->getSpecularity());
// Model transparency.
shader->setFloat(Shader::TRANSPARENCY, renderable->getTransparency());
// Bind main texture.
if (renderable->getTexture() != lastTexture
&& hTextures[0] != -1) {
lastTexture = renderable->getTexture();
if (shader->hasHandle(Shader::MAIN_TEXTURE)) {
if (lastTexture == 0) {
shader->setFloat(Shader::MAIN_TEXTURE, 0.0f);
}
else {
shader->setFloat(Shader::MAIN_TEXTURE, 1.0f);
}
}
glActiveTexture(GL_TEXTURE0);
CHECK_GL_ERROR("glActiveTexture");
glBindTexture(GL_TEXTURE_2D, renderable->getTexture());
CHECK_GL_ERROR("glBindTexture");
glUniform1i(hTextures[0], 0);
CHECK_GL_ERROR("glUniform1i");
}
if (renderable->getIBO() > 0) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
renderable->getIBO());
if (renderable->getIndexType() ==
Renderable::INDEX_TYPE_USHORT) {
glDrawElements(renderType,
renderable->getIndexCount(),
GL_UNSIGNED_SHORT,
0);
CHECK_GL_ERROR("glDrawElements");
}
else {
glDrawElements(renderType,
renderable->getIndexCount(),
GL_UNSIGNED_INT,
0);
CHECK_GL_ERROR("glDrawElementsInt");
}
}
else {
glDrawArrays(renderType, 0, renderable->getVertexCount() / 3);
CHECK_GL_ERROR("glDrawArrays");
}
}
//// Unbind the cube map.
//if (node->hasResource(Resource::CUBE_MAP)) {
// glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
//}
//// Unbind the textures.
//for (UINT i = 0; i < 8; i++) {
// glActiveTexture(GL_TEXTURE0 + i);
// CHECK_GL_ERROR("glActiveTexture");
// glBindTexture(GL_TEXTURE_2D, 0);
//}
}
So the problem was glBindBuffer() call after this part of code:
// Bind combined buffer object.
if (renderable->getCBO() > 0) {
int stride = renderable->getVertexStride();
glBindBuffer(GL_ARRAY_BUFFER, renderable->getCBO());
if (shader->hasHandle(Shader::POS)) {
glEnableVertexAttribArray(shader->getHandle(Shader::POS));
glVertexAttribPointer(
shader->getHandle(Shader::POS), 3, GL_FLOAT, GL_FALSE,
stride, ((char*) 0) + renderable->getPosOffset());
}
if (renderable->getNormalOffset() != -1
&& shader->hasHandle(Shader::NORMAL)) {
glEnableVertexAttribArray(shader->getHandle(Shader::NORMAL));
glVertexAttribPointer(
shader->getHandle(Shader::NORMAL), 3, GL_FLOAT, GL_FALSE,
stride, ((char*) 0) + renderable->getNormalOffset());
}
if (renderable->getUVOffset() != -1 && shader->hasHandle(Shader::UV)) {
glEnableVertexAttribArray(shader->getHandle(Shader::UV));
glVertexAttribPointer(
shader->getHandle(Shader::UV), 2, GL_FLOAT, GL_FALSE,
stride, ((char*) 0) + renderable->getUVOffset());
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
I had to move glBindBuffer() to the end of same method, and I also wrote glDisableVertexAttribArray for position, normal and UV buffers. This solved the problem, but I'm not sure why. I thought there is no need to call glDisableVertexAttribArray for VBO.
I think this problem is specific for NVIDIA drivers and gives first chance exception for nvoglv32.dll.