Opengl shader bitwise operation not working in Windows - c++

I use an integer to use as a "filter" and pass it to a geometric shader and use a bitwise operation to get the bit values. It works in macOS, but not in Windows. To show my point, I used and modified the tutorial code in the geometric shader part in the learnopengl.com found at
https://learnopengl.com/Advanced-OpenGL/Geometry-Shader
Based on the tutorial code, I added the following code in the main.cpp. (I added enabledFaces[] and VFO, and passed them to the shaders.) I only set one bit to each enabledFaces[] integer for simplicity.
int enabledFaces[] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3
};
unsigned int VBO, VAO, VFO;
glGenBuffers(1, &VBO);
glGenBuffers(1, &VFO);
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(points), &points, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(2 * sizeof(float)));
glBindVertexArray(VFO);
glBindBuffer(GL_ARRAY_BUFFER, VFO);
glBufferData(GL_ARRAY_BUFFER, sizeof(enabledFaces), &enabledFaces, GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_INT, GL_FALSE, sizeof(int), 0);
glBindVertexArray(0);
In the vertex shader as a pass-through:
#version 330 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec3 aColor;
layout (location = 2) in int vEnabledFaces;
out int gEnabledFaces;
out VS_OUT {
vec3 color;
} vs_out;
void main() {
vs_out.color = aColor;
gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0);
gEnabledFaces = vEnabledFaces;
}
And the geometric shader (added the if statement with the gEnabledFaces):
##version 330 core
layout (points) in;
layout (triangle_strip, max_vertices = 5) out;
in int gEnabledFaces[];
in VS_OUT {
vec3 color;
} gs_in[];
out vec3 fColor;
void build_house(vec4 position)
{
fColor = gs_in[0].color; // gs_in[0] since there's only one input vertex
gl_Position = position + vec4(-0.2, -0.2, 0.0, 0.0); // 1:bottom-left
EmitVertex();
gl_Position = position + vec4( 0.2, -0.2, 0.0, 0.0); // 2:bottom-right
EmitVertex();
gl_Position = position + vec4(-0.2, 0.2, 0.0, 0.0); // 3:top-left
EmitVertex();
gl_Position = position + vec4( 0.2, 0.2, 0.0, 0.0); // 4:top-right
EmitVertex();
gl_Position = position + vec4( 0.0, 0.4, 0.0, 0.0); // 5:top
fColor = vec3(1.0, 1.0, 1.0);
EmitVertex();
EndPrimitive();
}
void main() {
if ( (gEnabledFaces[0] & 0x01) != 0 || (gEnabledFaces[0] & 0x04) != 0)
build_house(gl_in[0].gl_Position);
}
No change in the fragment shader:
#version 330 core
out vec4 FragColor;
in vec3 fColor;
void main()
{
FragColor = vec4(fColor, 1.0);
}
Due to the if statement in the main() in the geometric shader, two houses (the first and the third polygons) out of the 4 polygons should be displayed. It works correctly on Mac, but nothing is displayed in Windows. If I remove the if statement in Windows, all polygons display correctly. Would someone please explain why this does not work in Windows and how to fix it? Thank you.

As suggested by G.M., the use of glVertexAttribIPointer solves the problem.
But I use Qt and unfortunately, it seems that glVertexAttribIPointer is not available. So I changed the glVertexAttribPointer to float type instead of an integer type. So,
Changing from
glVertexAttribPointer(2, 1, GL_INT, GL_FALSE, sizeof(int), 0);
to
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, sizeof(float), 0);
Then it works in Windows, in spite of the fact that the passing variables (in the C++ and also in shaders) are all integer type. Strange but it works.

Related

How to Draw OpenMesh using OpenGL

i'm working on a project where i'm using OpenMesh to read stl and obj files and draw them on the screen using openGL.
i've been doing the following,
#include <OpenMesh/Core/Mesh/TriMesh_ArrayKernelT.hh>
#include <OpenMesh/Core/IO/MeshIO.hh>
OpenMesh::TriMesh_ArrayKernelT<> mesh;
std::vector<point> vertices;
std::vector<point> normals;
void readMesh(std::string file)
{
OpenMesh::IO::read_mesh(mesh, file);
mesh.requestFaceNormals();
mesh.request_vertex_normals();
mesh.updateNormals();
vertices.clear();
normals.clear();
for (auto face : mesh.faces())
{
for (auto vertex : mesh.fv_range(face))
{
auto point = mesh.point(vertex);
auto normal = mesh.normal(face);
vertices.push_back(point);
normals.push_back(normal);
}
}
mesh.releaseFaceNormals();
mesh.releaseVertexNormals();
}
and when drawing i just pass the vertices and normals vectors to the vertex shader like this
void paint()
{
glSetAttributeArray(0, vertices.data());
glSetAttributeArray(1, normals.data());
glDrawArrays(GL_TRIANGLES, 0, vertices.length());
}
where the vertex shader looks like this:
attribute vec3 position;
attribute vec3 normal;
uniform mat4 modelViewMatrix;
void main(void)
{
vec4 color = vec4(0.25, 0.25, 0.25, 0.0);
vec4 P = vec4(position, 0);
vec4 N = vec4(normal, 0);
vec3 L = vec3(20, 20, 20) - position;
vec3 V = -position;
N = normalize(N);
L = normalize(L);
V = normalize(V);
vec3 R = reflect(-L, vec3(N));
vec3 diffuse = max(dot(vec3(N), L), 0.0) * color.rgb;
vec3 specular = pow(max(dot(R, V), 0.0), 0.2) * vec3(0.1, 0.1, 0.1);
color = vec4(color.a * (ambient + diffuse + specular), color.a);
color = clamp(color, 0.0, 1.0);
gl_Color = color;
gl_Position = modelViewMatrix * P;
}
and the fragment shader is:
void main(void)
{
gl_FragColor = gl_Color;
}
this produces pretty good results, but the idea of having another copy of the vertices and normals stored in another location (normals and vertices) to be able to draw the mesh looks very counter-intuitive.
i was wondering if i can use openGL buffers with openMesh to optimize this. i've been searching for anything concerning this topic for a while but found nothing.
See Vertex Specification. You can create 2 Vertex Buffer Object for the verticex cooridantes and nortmal vertors:
GLuint vbos[2];
glGenBuffers(2, vbos);
glBindBuffer(GL_ARRAY_BUFFER, vbos[0]);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertices[0]), vertices.data(), GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vbos[1]);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(normals[0]), normals.data(), GL_STATIC_DRAW);
If you use OpenGL 3.0 or later, then you can specify a Vertex Array Object a nd state the vertex specification:
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbos[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glBindBuffer(GL_ARRAY_BUFFER, vbos[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
When you want to draw the mesh, then it is sufficient to bind the VAO:
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, vertices.length());
If you use OpenGL 2.0, the you cannot create a VAO, thus you have to specify the arrays of generic vertex attribute data, before drawing the mesh:
glBindBuffer(GL_ARRAY_BUFFER, vbos[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glBindBuffer(GL_ARRAY_BUFFER, vbos[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glDrawArrays(GL_TRIANGLES, 0, vertices.length());
Furthermore note, that the attribute indices are not guaranteed to be 0 and 1. The attribute indices can be any arbitrary number.
If you would use GLSL version 3.30 the it would be possible to set the attribute indices in the shader code by Layout Qualifier.
Anyway you an define the attribute indices by glBindAttribLocation before linking the program or retrieve the attribute indices by glGetAttribLocation after linking the program.

OpenGL camera movement program vertex shader issue

So, I'm a beginner learning graphics programmer. I'm working on a program for camera movement. I think there's something wrong with the vertex shader. The program runs with no errors but the screen is completely blank. Here is the vertex shader I'm using:
#version 330
in vec4 vPosition;
out vec4 vColor;
uniform mat4 model_view;
uniform mat4 projection;
void main()
{
vec4 pos = projection * model_view * vPosition / vPosition.w;
gl_Position = pos;
vColor = vPosition;
}
If I switch the shader back to basic version:
#version 330
in vec4 vPosition;
out vec4 vColor;
void
main()
{
gl_Position = vPosition;
vColor = vPosition;
}
The program runs and renders a triangle successfully. So, I'm pretty sure the error is with the shader.
The shader is called in the initialize function:
void initialize(void)
{
glClearColor(1.0, 1.0, 1.0, 1.0); // white background
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// Create and initialize a buffer object
GLuint buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(points), points, GL_STATIC_DRAW);
// Load shaders and use the resulting shader program
GLuint program = InitShader("res/shaders/vshader21.glsl", "res/shaders/fshader21.glsl");
model_view = glGetUniformLocation(program, "model_view");
projection = glGetUniformLocation(program, "projection");
glUseProgram(program);
// Initialize the vertex position attribute from the vertex shader
GLuint loc = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(loc);
glVertexAttribPointer(loc, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
}
the 'points' in glBufferData is as follows:
const int WIDTH = 500, HEIGHT = 500;
/* Positions */
vec4 points[] = {
vec4(0.5,0.5, 1, 1),
vec4(-0.5,0.5, 1, 1),
vec4(0.5,-0.5, 1, 1) ,
vec4(-0.5,-0.5, 1, 1)
};
model_view and projection are of GLuint type in main application and global.
I set the uniform variables (position, model_view) in the display functions.
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT); // clear the window
glPointSize(20.0);
// Projection transformation parameters
GLfloat left = -1.0, right = 1.0;
GLfloat bottom = -1.0, top = 1.0;
GLfloat zNear = 0, zFar = 3.0;
mat4 p = Ortho(left, right, bottom, top, zNear, zFar);
glUniformMatrix4fv(projection, 1, GL_TRUE, p);
vec4 eye(0.0, 0.0, -1.0, 1.0);
vec4 at(0.0, 0.0, 0.0, 1.0);
vec4 up(0.0, 1.0, 0.0, 0.0);
mat4 mv = LookAt(eye, at, up);
glUniformMatrix4fv(model_view, 1, GL_TRUE, mv);
glDrawArrays(GL_TRIANGLES, 0, 3); // draw the points
glFlush();
}
What could possibly be going wrong?
The explicit division by the .w component is superfluous.
vec4 pos = projection * model_view * vPosition / vPosition.w;
vec4 pos = projection * model_view * vPosition;
Note, the Perspective divide is automatically performed after clipping.
Since the vector is multiplied to the uniforms form the right, you do not have to transpose the matrices:
glUniformMatrix4fv(projection, 1, GL_TRUE, p);
glUniformMatrix4fv(projection, 1, GL_FALSE, p);
glUniformMatrix4fv(model_view, 1, GL_TRUE, mv);
glUniformMatrix4fv(model_view, 1, GL_FALSE, mv);
See GLSL Programming/Vector and Matrix Operations

Why vertex transformation outside GPU is not the same vertex transformation inside GPU?

As we know if we want to draw something in World space we need multiply our data by view and projection matrices. Now I need for my task to make vertex transformation outside shaders and sent transformated vertex directly. Like this:
Initialization matrix
glm::mat4 view, projection;
view = glm::lookAt(this->cameraPos, this->cameraPivot, this->Up);
projection = glm::perspective(glm::radians(60.0f), (float)screenWidth / (float)screenHeight, 0.1f, 100.0f);
Code
vector<glm::vec3> data;
glm::mat4 projection, view;
...
data.push_back(glm::vec3(projection * view * glm::vec4(0.0, 0.0, 0.5, 0.0)));
data.push_back(glm::vec3(projection * view * glm::vec4(0.0, 0.5, 0.5, 0.0)));
data.push_back(glm::vec3(projection * view * glm::vec4(0.5, 0.5, 0.5, 0.0)));
data.push_back(glm::vec3(projection * view * glm::vec4(0.5, 0.0, 0.5, 0.0)));
...
shader.set();
GLuint VAO,VBO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &wVBO);
glBindVertexArray(wVAO);
glBindBuffer(GL_ARRAY_BUFFER, wVBO);
glBufferData(GL_ARRAY_BUFFER, data.size() * sizeof(vector<glm::vec3>), &data.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
...
glBindVertexArray(wVAO);
glDrawArrays(GL_LINE_LOOP, 0, data.size());
glBindVertexArray(0);
Shader
#version 330 core
layout (location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position, 1.0f);
}
But I get wrong view transformation - it looks like my data doesn't take into account the projection matrix, only the view matrix. But if I change it as usual:
Code
...
data.push_back(glm::vec3(glm::vec4(0.0, 0.0, 0.5, 0.0)));
...
shader.set();
glUniformMatrix4fv(glGetUniformLocation(shader.Program, "matrices"), 1,
GL_FALSE, glm::value_ptr(projection * view));
Shader
#version 330 core
layout (location = 0) in vec3 position;
uniform mat4 matrices;
void main()
{
gl_Position = matrices * vec4(position, 1.0f);
}
it's works as expected. What is my mistake? In my task needs to know position all of vertex in World-space before it will sended in shader. How can I do that?
The homogeneous coorinate "w" for your vertices should be 1.0f or else no transform will be applied to the vertex.
I got it! Thanks Daniel for idea!!!
In my case no matters "w" is in inicialization of vertices. This is important in another.
Looks on a shader code in example #2:
#version 330 core
layout (location = 0) in --->vec3<--- position;
uniform mat4 matrices;
void main()
{
gl_Position = matrices * vec4(position, --->1.0f<---);
}
In input shader I send vec3 and it's wrong! We need to save "w" component which produced from matrix multiplication:
Shader
layout (location = 0) in vec4 position;
...
void main()
{
gl_Position = matrices * position;
}
Code
vector<glm::vec4> data;
glm::mat4 projection, view;
...
data.push_back(projection * view * glm::vec4(0.0, 0.0, 0.5, 1.0));
...
glBufferData(GL_ARRAY_BUFFER, data.size() * sizeof(glm::vec4), &data.data(), GL_STATIC_DRAW);
....
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0);
That's it! Thanks everyone!
glm::vec4 as bytes is smth + 4 floats when shader expects 4 floats. At least size is wrong(and I suspect data too).That's said about all : question ,answer and comments.

OpenGL VBO data seems to get corrupted

I've uploaded vertices, colors, normals, and texture coordinates into a single VBO, which is associated with a VAO. I also have an EBO associated with the same VAO that stores indices. I am also using SDL and OpenGL 3.3 (Core Profile context, which is set using SDL).
At first, my model seems to render fine. Then after maybe 8 or so seconds, it looks like the data gets corrupted.
Here is a video: https://youtu.be/eEiH3EFTPFk
Every frame I am pulling the data out of OpenGL (using glGetNamedBufferSubData) and comparing to what it should be, and everything seems to check out.
Does anyone have any idea what might be happening here? I appreciate any insight you guys might be able to provide.
Here is my code for loading the model data:
struct Vbo
{
GLuint id;
};
struct Ebo
{
GLuint id;
GLenum mode;
GLsizei count;
GLenum type;
};
struct Vao
{
GLuint id;
Vbo vbo[4];
Ebo ebo;
};
// ...
MeshId GraphicsEngine::createStaticMesh(
std::vector<glm::vec3> vertices,
std::vector<glm::detail::uint32> indices,
std::vector<glm::vec4> colors,
std::vector<glm::vec3> normals,
std::vector<glm::vec2> textureCoordinates
)
{
Vao vao;
glGenVertexArrays(1, &vao.id);
glGenBuffers(1, &vao.vbo[0].id);
glGenBuffers(1, &vao.ebo.id);
auto size = vertices.size() * sizeof(glm::vec3);
size += colors.size() * sizeof(glm::vec4);
size += normals.size() * sizeof(glm::vec3);
size += textureCoordinates.size() * sizeof(glm::vec2);
glBindVertexArray(vao.id);
glBindBuffer(GL_ARRAY_BUFFER, vao.vbo[0].id);
glBufferData(GL_ARRAY_BUFFER, size, nullptr, GL_STATIC_DRAW);
auto offset = 0;
glBufferSubData(GL_ARRAY_BUFFER, offset, vertices.size() * sizeof(glm::vec3), &vertices[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
offset += vertices.size() * sizeof(glm::vec3);
glBufferSubData(GL_ARRAY_BUFFER, offset, colors.size() * sizeof(glm::vec4), &colors[0]);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (GLvoid*)(offset));
glEnableVertexAttribArray(1);
offset += colors.size() * sizeof(glm::vec4);
glBufferSubData(GL_ARRAY_BUFFER, offset, normals.size() * sizeof(glm::vec3), &normals[0]);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)(offset));
glEnableVertexAttribArray(2);
offset += normals.size() * sizeof(glm::vec3);
glBufferSubData(GL_ARRAY_BUFFER, offset, textureCoordinates.size() * sizeof(glm::vec2), &textureCoordinates[0]);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (GLvoid*)(offset));
glEnableVertexAttribArray(3);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vao.ebo.id);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(glm::detail::uint32), &indices[0], GL_STATIC_DRAW);
glBindVertexArray(0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
vao.ebo.count = indices.size();
vao.ebo.mode = GL_TRIANGLES;
vao.ebo.type = GL_UNSIGNED_INT;
vertexArrayObjects_.push_back(vao);
auto index = vertexArrayObjects_.size() - 1;
return MeshId(index);
}
Here is my code that does the rendering:
// Setup camera
const glm::quat temp = glm::conjugate(camera_.orientation);
view_ = glm::mat4_cast(temp);
view_ = glm::translate(view_, glm::vec3(-camera_.position.x, -camera_.position.y, -camera_.position.z));
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(shaderProgram_);
const int modelMatrixLocation = glGetUniformLocation(shaderProgram_, "modelMatrix");
const int pvmMatrixLocation = glGetUniformLocation(shaderProgram_, "pvmMatrix");
const int normalMatrixLocation = glGetUniformLocation(shaderProgram_, "normalMatrix");
glm::detail::uint32 i = 0;
for ( const auto& r : renderables_ )
{
const auto& graphicsData = graphicsData_[i];
glm::mat4 newModel = glm::translate(model_, graphicsData.position);
newModel = newModel * glm::mat4_cast( graphicsData.orientation );
newModel = glm::scale(newModel, graphicsData.scale);
// Send uniform variable values to the shader
const glm::mat4 pvmMatrix(projection_ * view_ * newModel);
glUniformMatrix4fv(pvmMatrixLocation, 1, GL_FALSE, &pvmMatrix[0][0]);
glm::mat3 normalMatrix = glm::inverse(glm::transpose(glm::mat3(view_ * newModel)));
glUniformMatrix3fv(normalMatrixLocation, 1, GL_FALSE, &normalMatrix[0][0]);
glUniformMatrix4fv(modelMatrixLocation, 1, GL_FALSE, &newModel[0][0]);
glBindTexture(GL_TEXTURE_2D, r.texture.id);
glBindVertexArray(r.vao.id);
glDrawElements(r.vao.ebo.mode, r.vao.ebo.count, r.vao.ebo.type, 0);
glBindVertexArray(0);
i++;
}
Fragment shader:
#version 330 core
in vec4 ourColor;
in vec2 texCoord;
out vec4 color;
uniform sampler2D ourTexture;
void main()
{
color = texture(ourTexture, texCoord);
}
Vertex shader:
#version 330 core
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
uniform mat4 pvmMatrix;
uniform mat3 normalMatrix;
layout (location = 0) in vec3 position;
layout (location = 1) in vec4 color;
layout (location = 2) in vec3 normal;
layout (location = 3) in vec2 textureCoordinate;
out vec4 ourColor;
out vec2 texCoord;
void main()
{
//gl_Position = vec4(position, 1.0);
gl_Position = pvmMatrix * vec4(position, 1.0);
ourColor = color;
texCoord = textureCoordinate;
}
As per #MichaelNastenkos comment about, I added glEnable(GL_DEPTH_TEST); before my rendering code and it seems to fix it.

Getting textures to work in OpenGL 3.2

I have been staring at this code for a while now with no luck. I'm working on integrating librocket into my own project (the library isn't that important to the question) and part of that requires writing a renderer class. I've been trying to do just that but can't get the textures to display. The vertex color and position work fine.
I'm using OpenGL3.2.
I've temporarily modified the code to try to draw a single quad. The only parameter being used is the texture parameter, which is just a GLuint cast to another type.
There's a good chance that I'm missing something stupid, but I can't see it. Hopefully another set of eyes will help. Feel free to ask for more code/info.
// Called by Rocket when it wants to render geometry that it does not wish to optimise.
void SDLRenderInterface::RenderGeometry(Rocket::Core::Vertex* vertices, int num_vertices, int* indices, int num_indices, const Rocket::Core::TextureHandle texture, const Rocket::Core::Vector2f& translation)
{
GLuint program;
GLuint vertexBuffer;
GLuint indexBuffer;
GLuint vertexPosLoc = 0;
GLuint vertexColorLoc = 0;
GLuint vertexTexCoordLoc = 0;
GLuint texSamplerLoc = 0;
GLuint translationLoc = 0;
GLuint viewDimLoc = 0;
int offset = 8;
int vertexCount = 4;
float vertexData[] = {-0.5, -0.5, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0,
0.5, -0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0,
0.5, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-0.5, 0.5, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0};
int indexData[] = {0,1,2,0,2,3};
int indexCount = 6;
// Populate vertex buffer
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*offset*vertexCount,
vertexData, GL_STATIC_DRAW);
// Populate index buffer
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int) * indexCount,
indexData, GL_STATIC_DRAW);
program = shaderManager->getProgram(2, "rocketTex.vert",
"rocketTex.frag");
glUseProgram(program);
// Set up the texture
texSamplerLoc = glGetUniformLocation(program, "texSampler");
vertexTexCoordLoc = glGetAttribLocation(program, "vertexTexCoord");
if(texSamplerLoc == -1)
{
std::cerr << "Error: cannot find texture location." << std::endl;
return;
}
if(vertexTexCoordLoc == -1)
{
std::cerr << "Error: cannot find texture coord location."
<< std::endl;
return;
}
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, (GLuint) texture);
glUniform1i(texSamplerLoc, 0);
// Set up the per vertex texture coords
glEnableVertexAttribArray(vertexTexCoordLoc);
glVertexAttribPointer(vertexTexCoordLoc, 2, GL_FLOAT, GL_FALSE,
offset * sizeof(float),
(void*) (sizeof(float) * 6));
// Set up uniforms
translationLoc = glGetUniformLocation(program, "translation");
viewDimLoc = glGetUniformLocation(program, "viewDimensions");
if(translationLoc == -1)
{
std::cerr << "Error: cannot find translation location."
<< std::endl;
return;
}
if(viewDimLoc == -1)
{
std::cerr << "Error: cannot find viewDim location."
<< std::endl;
return;
}
glUniform2f(translationLoc, 0,0);
glUniform2f(viewDimLoc, 1,1);
// Set up per-vertex attributes
vertexPosLoc = glGetAttribLocation(program, "vertexPosition");
vertexColorLoc = glGetAttribLocation(program, "vertexColor");
if(vertexPosLoc == -1)
{
std::cerr << "Error: cannot find vertex position location."
<< std::endl;
return;
}
if(vertexColorLoc == -1)
{
std::cerr << "Error: cannot find vertex color location."
<< std::endl;
return;
}
glEnableVertexAttribArray(vertexPosLoc);
glEnableVertexAttribArray(vertexColorLoc);
glVertexAttribPointer(vertexPosLoc, 2, GL_FLOAT, GL_FALSE,
offset * sizeof(float), 0);
glVertexAttribPointer(vertexColorLoc, 4, GL_FLOAT, GL_TRUE,
offset * sizeof(float),
(void*) (sizeof(float) * 2));
// Draw the geometry
glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_INT, 0);
glDisableVertexAttribArray(vertexPosLoc);
glDisableVertexAttribArray(vertexColorLoc);
glDisableVertexAttribArray(vertexTexCoordLoc);
glDeleteBuffers(1, &vertexBuffer);
glDeleteBuffers(1, &indexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glUseProgram(0);
}
Vertex Shader:
#version 120
uniform vec2 translation;
uniform vec2 viewDimensions;
attribute vec2 vertexPosition;
attribute vec4 vertexColor;
attribute vec2 vertexTexCoord;
varying vec2 texCoord;
varying vec4 fragColor;
void main(void)
{
vec2 ndcPos = ((vertexPosition + translation)/(viewDimensions));
texCoord = vertexTexCoord;
fragColor = vertexColor;
gl_Position = vec4(ndcPos, 0.0, 1.0);
}
Fragment Shader:
#version 120
uniform sampler2D texSampler;
varying vec2 texCoord;
varying vec4 fragColor;
void main(void)
{
vec4 objectColor = texture2D(texSampler, texCoord);
gl_FragColor = vec4((objectColor * fragColor).xyz, 1.0);
}
So, I finally figured it out. jozxyqk's advice for testing the texture coords confirmed my suspicions that the texture coordinates were off (every vertex was getting the same coordinate). The problem ended up being that I was calling glVertexAttribDivisor(attributeLoc, 1) in another part of my code and never setting it back to per vertex, so it was affecting my other shaders. Thinking about the design of OpenGL, it makes sense that this would be necessary.
Glad that's settled!