GLSL(1.2) + VBOs + Textures - c++

Again applying their considerable help with a problem. This is my code:
float ctex[] = {0.0,0.0,
0.0,1.0,
1.0,1.0,
1.0,0.0};
float data[] = {1.0, 1.0,-5.0,
-1.0,-1.0,-5.0,
1.0,-1.0,-5.0,
-1.0, 1.0,-5.0};
GLuint ind[] = {0,1,2,0,3,1};
LoadTexture();
glGenBuffers(1,&triangleVBO);
glBindBuffer(GL_ARRAY_BUFFER,triangleVBO);
glBufferData(GL_ARRAY_BUFFER,sizeof(data),data,GL_STATIC_DRAW);
glGenBuffers(1,&triangleIND);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,triangleIND);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,sizeof(ind),ind,GL_STATIC_DRAW);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,0,0);
glGenBuffers(1,&triangleT[0]);
glBindBuffer(GL_ARRAY_BUFFER,triangleT[0]);
glBufferData(GL_ARRAY_BUFFER,sizeof(ctex),ctex,GL_STATIC_DRAW);
glVertexAttribPointer(1,2,GL_FLOAT,GL_FALSE,0,0);
GLuint v,f,p;
v = glCreateShader(GL_VERTEX_SHADER);
f = glCreateShader(GL_FRAGMENT_SHADER);
p = glCreateProgram();
char *vsFuente = LeeShader("shaders/shader.vert");
char *fsFuente = LeeShader("shaders/shader.frag");
const char *vs = vsFuente;
const char *fs = fsFuente;
glShaderSource(v,1,&vs,NULL);
glShaderSource(f,1,&fs,NULL);
free(vsFuente);free(fsFuente);
glCompileShader(v);
glCompileShader(f);
glAttachShader(p,v);
glAttachShader(p,f);
glLinkProgram(p);
//main loop
while(1){
...etc.
glUseProgram(p);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER,triangleVBO);
glDrawElements(GL_TRIANGLES,6,GL_UNSIGNED_INT,0);
glDisableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER,triangleTex);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,idtextere);
glEnable(GL_TEXTURE_2D);
glDisableVertexAttribArray(1);
glUseProgram(0);
...etc.
}
This is my Vertex Shader:
void main(){
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = ftransform();
}
And this my Fragment Shader:
uniform sampler2D tex;
void main(){
vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = color;
}
The problem is that the texture does not appear or anything else. I could say What's the problem?
Thank you very much in advance.

You are using the old fixed-function attributes in your shader (like gl_MultiTexCoord0 or gl_Vertex as used by ftransform). But in your application code you try to load them with the generic attribute interface (like glVertexAttribPointer and glEnableVertexAttribArray). This won't work (it might work for attribute 0, which is an alias for gl_Vertex, though, but that's counter-intuitive anyway).
There are two way to fix this. Either don't use the generic attribute API, but the old fixed-function attributes, so replace glVertexAttribPointer(0, ...) with glVertexPointer and glVertexAttribPointer(1, ...) with glTexCoordPointer and likewise glEnableVertexAttribArray with glEnableClientState(GL_VERTEX_ARRAY) and glEnableClientState(GL_TEXTURE_COORD_ARRAY).
Or, the more modern and future-proof approach, drop the usage of the old fixed-function stuff inside your shaders and put in your attributes as generic attributes:
attribute vec4 position;
attribute vec2 texCoord;
void main() {
gl_TexCoord[0] = texCoord;
gl_Position = gl_ModelViewProjectionMatrix * position;
}
And don't forget to call glBindAttribLocation(p, 0, "position") and glBindAttribLocation(p, 1, "texCoord") before linking the program in order to assign the attribute indices to the correct attributes.
But the second approach, even if preferred, might be a bit too heavy a change for you right now, since it should actually be accompanied by dropping any use of old fixed-function stuff inside your shaders, like the modelview projection matrix, which should rather be a custom uniform, or the gl_TexCoord[0] varying, which should rather be a custom varying.

Related

Simple GL fragment shader behaves strangely on newer GPU

I am tearing my hair out at this problem! I have a simple vertex and fragment shader that worked perfectly (and still does) on an old Vaio laptop. It's for a particle system, and uses point sprites and a single texture to render particles.
The problem starts when I run the program on my desktop, with a much newer graphics card (Nvidia GTX 660). I'm pretty sure I've narrowed it down to the fragment shader, as if I ignore the texture and simply pass inColor out again, everything works as expected.
When I include the texture in the shader calculations like you can see below, all points drawn while that shader is in use appear in the center of the screen, regardless of camera position.
You can see a whole mess of particles dead center using the suspect shader, and untextured particles rendering correctly to the right.
Vertex Shader to be safe:
#version 150 core
in vec3 position;
in vec4 color;
out vec4 Color;
uniform mat4 view;
uniform mat4 proj;
uniform float pointSize;
void main() {
Color = color;
gl_Position = proj * view * vec4(position, 1.0);
gl_PointSize = pointSize;
}
And the fragment shader I suspect to be the issue, but really can't see why:
#version 150 core
in vec4 Color;
out vec4 outColor;
uniform sampler2D tex;
void main() {
vec4 t = texture(tex, gl_PointCoord);
outColor = vec4(Color.r * t.r, Color.g * t.g, Color.b * t.b, Color.a * t.a);
}
Untextured particles use the same vertex shader, but the following fragment shader:
#version 150 core
in vec4 Color;
out vec4 outColor;
void main() {
outColor = Color;
}
Main Program has a loop processing SFML window events, and calling 2 functions, draw and update. Update doesn't touch GL at any point, draw looks like this:
void draw(sf::Window* window)
{
glClearColor(0.3f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
sf::Texture::bind(&particleTexture);
for (ParticleEmitter* emitter : emitters)
{
emitter->useShader();
camera.applyMatrix(shaderProgram, window);
emitter->draw();
}
}
emitter->useShader() is just a call to glUseShader() using a GLuint pointing to a shader program that is stored in the emitter object on creation.
camera.applyMatrix() :
GLuint projUniform = glGetUniformLocation(program, "proj");
glUniformMatrix4fv(projUniform, 1, GL_FALSE, glm::value_ptr(projectionMatrix));
...
GLint viewUniform = glGetUniformLocation(program, "view");
glUniformMatrix4fv(viewUniform, 1, GL_FALSE, glm::value_ptr(viewMatrix));
emitter->draw() in it's entirity:
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// Build a new vertex buffer object
int vboSize = particles.size() * vboEntriesPerParticle;
std::vector<float> vertices;
vertices.reserve(vboSize);
for (unsigned int particleIndex = 0; particleIndex < particles.size(); particleIndex++)
{
Particle* particle = particles[particleIndex];
particle->enterVertexInfo(&vertices);
}
// Bind this emitter's Vertex Buffer
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// Send vertex data to GPU
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * vertices.size(), &vertices[0], GL_STREAM_DRAW);
GLint positionAttribute = glGetAttribLocation(shaderProgram, "position");
glEnableVertexAttribArray(positionAttribute);
glVertexAttribPointer(positionAttribute,
3,
GL_FLOAT,
GL_FALSE,
7 * sizeof(float),
0);
GLint colorAttribute = glGetAttribLocation(shaderProgram, "color");
glEnableVertexAttribArray(colorAttribute);
glVertexAttribPointer(colorAttribute,
4,
GL_FLOAT,
GL_FALSE,
7 * sizeof(float),
(void*)(3 * sizeof(float)));
GLuint sizePointer = glGetUniformLocation(shaderProgram, "pointSize");
glUniform1fv(sizePointer, 1, &pointSize);
// Draw
glDrawArrays(GL_POINTS, 0, particles.size());
And finally, particle->enterVertexInfo()
vertices->push_back(x);
vertices->push_back(y);
vertices->push_back(z);
vertices->push_back(r);
vertices->push_back(g);
vertices->push_back(b);
vertices->push_back(a);
I'm pretty sure this isn't an efficient way to do all this, but this was a piece of coursework I wrote a semester ago. I'm only revisiting it to record a video of it in action.
All shaders compile and link without error. By playing with the fragment shader, I've confirmed that I can use gl_PointCoord to vary a solid color across particles, so that is working as expected. When particles draw in the center of the screen, the texture is drawn correctly, albeit in the wrong place, so that is loaded and bound correctly as well. I'm by no means a GL expert, so that's about as much debugging as I could think to do myself.
This wouldn't be annoying me so much if it didn't work perfectly on an old laptop!
Edit: Included a ton of code
As turned out in the comments, the shaderProgram variable which was used for setting the camera-related uniforms did not depend on the actual program in use. As a result, the uniform locations were queried for a different program when drawing the textured particles.
The uniform location assignment is totally implementation specific, nvidia for example tends to assign them by the alphabetical order of the uniform names, so view's location would change depending if tex is actually present (and acttively used) or not. If the other implementation just assigns them by the order they appear in the code or some other scheme, things might work by accident.

OpenGL: changing texture coordinates on the fly

I am currently trying to render the value of an integer using a bitmap (think scoreboard for invaders) but I'm having trouble changing texture coordinates while the game is running.
I link the shader and data like so:
GLint texAttrib = glGetAttribLocation(shaderProgram, "texcoord");
glEnableVertexAttribArray(texAttrib);
glVertexAttribPointer(texAttrib, 2, GL_FLOAT, GL_FALSE,
4 * sizeof(float), (void*)(2 * sizeof(float)));
And in my shaders I do the following:
Vertex Shader:
#version 150
uniform mat4 mvp;
in vec2 position;
in vec2 texcoord;
out vec2 Texcoord;
void main() {
Texcoord = texcoord;
gl_Position = mvp * vec4(position, 0.0, 1.0) ;
}
FragmentShader:
#version 150 core
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D tex;
void main() {
outColor = texture2D(tex, Texcoord);
}
How would I change this code/implement a function to be able to change the texcoord variable?
If you need to modify the texture coordinates frequently, but the other vertex attributes remain unchanged, it can be beneficial to keep the texture coordinates in a separate VBO. While it's generally preferable to use interleaved attributes, this is one case where that's not necessarily the most efficient solution.
So you would have two VBOs, one for the positions, and one for the texture coordinates. Your setup code will look something like this:
GLuint vboIds[2];
glGenBuffers(2, vboIds);
// Load positions.
glBindBuffer(GL_ARRAY_BUFFER, vboIds[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(positions), positions, GL_STATIC_DRAW);
// Load texture coordinates.
glBindBuffer(GL_ARRAY_BUFFER, vboIds[1]);
glBufferData(GL_ARRAY_BUFFER, sizeof(texCoords), texCoords, GL_DYNAMIC_DRAW);
Note the different last argument to glBufferData(), which is a usage hint. GL_STATIC_DRAW suggests to the OpenGL implementation that the data will not be modified on a regular basis, while GL_DYNAMIC_DRAW suggests that it will be modified frequently.
Then, anytime your texture data changes, you can modify it with glBufferSubData():
glBindBuffer(GL_ARRAY_BUFFER, vboIds[1]);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(texCoords), texCoords);
Of course if only part of them change, you would only make the call for the part that changes.
You did not specify how exactly the texture coordinates change. If it's just something like a simple transformation, it would be much more efficient to apply that transformation in the shader code, instead of modifying the original texture coordinates.
For example, say you only wanted to shift the texture coordinates. You could have a uniform variable for the shift in your vertex shader, and then add it to the incoming texture coordinate attribute:
uniform vec2 TexCoordShift;
in vec2 TexCoord;
out vec2 FragTexCoord;
...
FragTexCoord = TexCoord + TexCoordShift;
and then in your C++ code:
// Once during setup, after linking program.
TexCoordShiftLoc = glGetUniformLocation(program, "TexCoordShift");
// To change transformation, after glUseProgram(), before glDraw*().
glUniform2f(TexCoordShiftLoc, xShift, yShift);
So I make no promises on the efficiency of this technique, but it's what I do and I'll be damned if text rendering is what slows down my program.
I have a dedicated class to store mesh, which consists of a few vectors of data, and a few GLuints to store pointers to my uploaded data. I upload data to openGL like this:
glBindBuffer(GL_ARRAY_BUFFER, position);
glBufferData(GL_ARRAY_BUFFER, sizeof(vec3) * data.position.size(), &data.position[0], GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, normal);
glBufferData(GL_ARRAY_BUFFER, sizeof(vec3) * data.normal.size(), &data.normal[0], GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, uv);
glBufferData(GL_ARRAY_BUFFER, sizeof(vec2) * data.uv.size(), &data.uv[0], GL_DYNAMIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * data.index.size(), &data.index[0], GL_DYNAMIC_DRAW);
Then, to draw it I go like this:
glEnableVertexAttribArray(positionBinding);
glBindBuffer(GL_ARRAY_BUFFER, position);
glVertexAttribPointer(positionBinding, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(normalBinding);
glBindBuffer(GL_ARRAY_BUFFER, normal);
glVertexAttribPointer(normalBinding, 3, GL_FLOAT, GL_TRUE, 0, NULL);
glEnableVertexAttribArray(uvBinding);
glBindBuffer(GL_ARRAY_BUFFER, uv);
glVertexAttribPointer(uvBinding, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index);
glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_INT, NULL);
glDisableVertexAttribArray(positionBinding);
glDisableVertexAttribArray(normalBinding);
glDisableVertexAttribArray(uvBinding);
This setup is designed for a full fledged 3D engine, so you can definitely tone it down a little. Basically, I have 4 buffers, position, uv, normal, and index. You probably only need the first two, so just ignore the others.
Anyway, each time I want to draw some text, I upload my data using the first code chunk I showed, then draw it using the second chunk. It works pretty well, and it's very elegant. This is my code to draw text using it:
vbo(genTextMesh("some string")).draw(); //vbo is my mesh containing class
I hope this helps, if you have any questions feel free to ask.
I use a uniform vec2 to pass the texture offset into the vertex shader.
I am not sure how efficient that is, but if your texture coordinates are the same shape, and just moved around, then this is an option.
#version 150
uniform mat4 mvp;
uniform vec2 texOffset;
in vec2 position;
in vec2 texcoord;
out vec2 Texcoord;
void main() {
Texcoord = texcoord + texOffset;
gl_Position = mvp * vec4(position, 0.0, 1.0) ;
}

OPENGL: drawing multiple point lights

I'm having an issue drawing multiple point lights in my scene. I am working on a simple maze-style game in OpenGL, where the maze is randomly generated. Each "room" in the maze is represented by a Room struct, like so:
struct Room
{
int x, z;
bool already_visited, n, s, e, w;
GLuint vertex_buffer, texture, uv_buffer, normal_buffer;
std::vector<glm::vec3>vertices, normals;
std::vector<glm::vec2>uvs;
glm::vec3 light_pos; //Stores the position of a light in the room
};
Each room has a light in it, the position of this light is stored in light_pos. This light is used in a simple per-vertex shader, like so:
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 uv_coords;
layout(location = 2) in vec3 normal;
uniform mat4 mvpMatrix;
uniform mat4 mvMatrix;
uniform vec3 lightpos;
out vec2 vs_uv;
out vec3 vs_normal;
out vec3 color;
void main()
{
gl_Position = mvpMatrix * vec4(pos,1.0);
vs_normal = normal;
vs_uv = uv_coords;
vec3 lightVector = normalize(lightpos - pos);
float diffuse = clamp(dot(normal,lightVector),0.0,1.0);
color = vec3(diffuse,diffuse,diffuse);
}
My fragment shader looks like this (ignore the "vs_normal", it is unused for now):
in vec2 vs_uv;
in vec3 vs_normal;
in vec3 color;
uniform sampler2D tex;
out vec4 frag_color;
void main()
{
frag_color = vec4(color,1.0) * texture(tex,vs_uv).rgba;
}
And my drawing code looks like this:
mat4 mvMatrix = view_matrix*model_matrix;
mat4 mvpMatrix = projection_matrix * mvMatrix;
glBindVertexArray(vertexBufferObjectID);
glUseProgram(shaderProgram);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
for (int x = 0; x < NUM_ROOMS_X; x++)
{
for (int z = 0; z < NUM_ROOMS_Z; z++)
{
//int x = int(std::round(position.x / ROOM_SIZE_X_MAX));
//int z = int(std::round(position.z / ROOM_SIZE_Z_MAX));
Room rm = room_array[x][z];
glBindBuffer(GL_ARRAY_BUFFER, rm.vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.uv_buffer);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.normal_buffer);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glUniformMatrix4fv(mvpMatrixID, 1, GL_FALSE, &mvpMatrix[0][0]);
glUniformMatrix4fv(mvMatrixID, 1, GL_FALSE, &mvMatrix[0][0]);
glUniform3fv(light_ID, 3, &rm.light_pos[0]); //Here is where I'm setting the new light position. It looks like this is ignored
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, rm.texture);
glUniform1i(texture_ID, 0);
glDrawArrays(GL_QUADS, 0, rm.vertices.size());
}
}
glUseProgram(0);
glBindVertexArray(0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindTexture(GL_TEXTURE_2D, 0);
However, here is what the result looks like (I've modified my drawing code to draw a box where each light is located, and I've circled the room at position (0,0)):
http://imgur.com/w4uPMD6
As you can see, it looks like only the light at position (0,0) affects any of the rooms on the map, the other lights are simply ignored. I know that the lights are positioned correctly, because the boxes I use to show the positions are correct. I think even though I'm setting the new light_pos, it isn't going through for some reason. Any ideas?
One thing that your are doing, which is not very common, is to pass the light position as a vertex attribute. Optimally, you should pass it to the shader as a uniform variable, just as you do with the matrices. But I doubt that is the problem here.
I believe your problem is that you are doing the light calculations in different spaces. The vertexes of the surfaces that you draw are in object/model space, while I'm guessing, your light is located at a point defined in world space. Try multiplying your light position by the inverse of the model matrix you are applying to the vertexes. I'm not familiar with GLM, but I figure there must be an inverse() function in it:
vec3 light_pos_object_space = inverse(model_matrix) * rm.light_pos;
glVertexAttrib3fv(light_ID, &light_pos_object_space[0]);
Figured out my problem. I was calling this function:
glUniform3fv(light_ID, 3, &rm.light_pos[0]);
When I should have been calling this:
glUniform3fv(light_ID, 1, &rm.light_pos[0]);

OpenGL/GLSL - Change viewing/model matrices for Glm Manipulation

I can't seem to get my Square into the correct viewing Matrix in order to manipulate it using glmfunctions.
This is basically my main.cpp which consists of init() which loads a texture, glsl vert/frag files and constructs the square from the ground class. There is also a reshape() and display() function which calls drawGround() that renders the actual square.
Inside the drawGround() I've added the model/view matrices and done a small translation but it doesn't work... I've been playing with it for hours and can't seem to get it working....
void display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
drawGround();
glUseProgram(0);
}
void drawGround(){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(myShader.handle());
GLuint matLocation = glGetUniformLocation(myShader.handle(), "ProjectionMatrix");
glUniformMatrix4fv(matLocation, 1, GL_FALSE, &ProjectionMatrix[0][0]);
glm::mat4 viewingMatrix = glm::translate(glm::mat4(1.0),glm::vec3(0,0,-1));
ModelViewMatrix = glm::translate(viewingMatrix,glm::vec3(15.0,0.0,0));
glUniformMatrix4fv(glGetUniformLocation(myShader.handle(), "ModelViewMatrix"), 1, GL_FALSE, &ModelViewMatrix[0][0]);
ground.render(texName, &myShader);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glUseProgram(0);
}
Yet on my other program I have the following function renderSky() which works just fine.
Please help me figure out where I'm going wrong...
If you need to see the Ground Class, let me know.
void renderSky(){
glUseProgram(mySkyShader.handle());
GLuint matLocation = glGetUniformLocation(mySkyShader.handle(), "ProjectionMatrix");
glUniformMatrix4fv(matLocation, 1, GL_FALSE, &ProjectionMatrix[0][0]);
glm::mat4 viewingMatrix = glm::translate(glm::mat4(1.0),glm::vec3(0,0,-1));
ModelViewMatrix = glm::translate(viewSkyMatrix,glm::vec3(15,0,0));
glUniformMatrix4fv(glGetUniformLocation(mySkyShader.handle(), "ModelViewMatrix"), 1, GL_FALSE, &ModelViewMatrix[0][0]);
skyBox.render();
glUseProgram(0);
}
This is the vertex shader:
#version 150
in vec3 in_Position;
in vec4 in_Color;
out vec4 ex_Color;
in vec2 in_TexCoord;
out vec2 ex_TexCoord;
void main(void)
{
gl_Position = vec4(in_Position, 1.0);
ex_Color = in_Color;
ex_TexCoord = in_TexCoord;
}
In order to get the object into the correct Model/View space, it has to be multiplied against the in_Position in the vertex shader.
gl_Position = vec4(in_Position, 1.0); becomes this:
gl_Position = ProjectionMatrix * ModelViewMatrix * vec4(in_Position, 1.0);
Also, the Model and Projection matrices need to have uniforms added to the shader:
uniform mat4 ModelViewMatrix;
uniform mat4 ProjectionMatrix;
The vertex shader works with this additional code and allows the matrices to be manipulated by any following glm functions.

OpenGL ES 2.0 Texture loading visual glitch

I have been successful in rendering primitives with a colour component via the shader and also translating them. However, upon attempting to load a texture and render it for the primitive via the shader, the primitives glitch, they should be squares:
As you can see, it successfully loads and applies the texture with the colour component to the single primitive in the scene.
If I then remove the color component, I again have primitives, but oddly, they are scaled by changing the uvs - this should not be the case, only the uvs should scale! (also their origin is offset)
My shader init code:
void renderer::initRendererGfx()
{
shader->compilerShaders();
shader->loadAttribute(#"Position");
shader->loadAttribute(#"SourceColor");
shader->loadAttribute(#"TexCoordIn");
}
Here is my object handler rendering function code:
void renderer::drawRender(glm::mat4 &view, glm::mat4 &projection)
{
//Loop through all objects of base type OBJECT
for(int i=0;i<SceneObjects.size();i++){
if(SceneObjects.size()>0){
shader->bind();//Bind the shader for the rendering of this object
SceneObjects[i]->mv = view * SceneObjects[i]->model;
shader->setUniform(#"modelViewMatrix", SceneObjects[i]->mv);//Calculate object model view
shader->setUniform(#"MVP", projection * SceneObjects[i]->mv);//apply projection transforms to object
glActiveTexture(GL_TEXTURE0); // unneccc in practice
glBindTexture(GL_TEXTURE_2D, SceneObjects[i]->_texture);
shader->setUniform(#"Texture", 0);//Apply the uniform for this instance
SceneObjects[i]->draw();//Draw this object
shader->unbind();//Release the shader for the next object
}
}
}
Here is my sprite buffer initialisation and draw code:
void spriteObject::draw()
{
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, sizeof(SpriteVertex), NULL);
glVertexAttribPointer((GLuint)1, 4, GL_FLOAT, GL_FALSE, sizeof(SpriteVertex) , (GLvoid*) (sizeof(GL_FLOAT) * 3));
glVertexAttribPointer((GLuint)2, 2, GL_FLOAT, GL_FALSE, sizeof(SpriteVertex) , (GLvoid*)(sizeof(GL_FLOAT) * 7));
glDrawElements(GL_TRIANGLE_STRIP, sizeof(SpriteIndices)/sizeof(SpriteIndices[0]), GL_UNSIGNED_BYTE, 0);
}
void spriteObject::initBuffers()
{
glGenBuffers(1, &vertexBufferID);
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(SpriteVertices), SpriteVertices, GL_STATIC_DRAW);
glGenBuffers(1, &indexBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(SpriteIndices), SpriteIndices, GL_STATIC_DRAW);
}
Here is the vertex shader:
attribute vec3 Position;
attribute vec4 SourceColor;
varying vec4 DestinationColor;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
uniform mat4 MVP;
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
void main(void) {
DestinationColor = SourceColor;
gl_Position = MVP * vec4(Position,1.0);
TexCoordOut = TexCoordIn;
}
And finally the fragment shader:
varying lowp vec4 DestinationColor;
varying lowp vec2 TexCoordOut;
uniform sampler2D Texture;
void main(void) {
gl_FragColor = DestinationColor * texture2D(Texture, TexCoordOut);
}
If you want to see any more specifics of certain elements, just ask.
Many thanks.
Are you sure your triangles have the same winding? The winding is the order in which the triangle points are listed ( either clockwise or counter-clockwise ). The winding is used in face culling to determine if the triangle is facing or back-facing.
You can easily check if your triangle are wrongly winded by disabling face culling.
glDisable( GL_CULL_FACE );
More information here ( http://db-in.com/blog/2011/02/all-about-opengl-es-2-x-part-23/#face_culling )