I am trying to wrap my head around the various types of GLSL shaders in OpenGL.
At the moment I am struggling with a 2d layered-tile implementation. For some reason the int values that get passed into my shader are always 0 (or more likely, null).
I currently have a 2048x2048px 2d texture composed of 20x20 tiles. I am trying to texture one quad with it and change the index of the tile based upon the block of ints I pass into the vertex shader.
I am passing in a vec2 of floats for the position of the quad (really a TRIANGLE_STRIP). I am also attempting to pass in 6 ints that will represent the 6 layers of tiles.
My input:
// Build and compile our shader program
Shader ourShader("b_vertex.vertexShader", "b_fragment.fragmentShader");
const int floatsPerPosition = 2;
const int intsPerTriangle = 6;
const int numVertices = 4;
const int sizeOfPositions = sizeof(float) * numVertices * floatsPerPosition;
const int sizeOfColors = sizeof(int) * numVertices * intsPerTriangle;
const int numIndices = 4;
const int sizeOfIndices = sizeof(int) * numIndices;
float positions[numVertices][floatsPerPosition] =
{
{ -1, 1 },
{ -1, -1 },
{ 1, 1 },
{ 1, -1 },
};
// ints indicating Tile Index
int colors[numVertices][intsPerTriangle] =
{
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
};
// Indexes on CPU
int indices[numVertices] =
{
0, 1, 2, 3,
};
My setup:
GLuint vao, vbo1, vbo2, ebo; // Identifiers of OpenGL objects
glGenVertexArrays(1, &vao); // Create new VAO
// Binded VAO will store connections between VBOs and attributes
glBindVertexArray(vao);
glGenBuffers(1, &vbo1); // Create new VBO
glBindBuffer(GL_ARRAY_BUFFER, vbo1); // Bind vbo1 as current vertex buffer
// initialize vertex buffer, allocate memory, fill it with data
glBufferData(GL_ARRAY_BUFFER, sizeOfPositions, positions, GL_STATIC_DRAW);
// indicate that current VBO should be used with vertex attribute with index 0
glEnableVertexAttribArray(0);
// indicate how vertex attribute 0 should interpret data in connected VBO
glVertexAttribPointer(0, floatsPerPosition, GL_FLOAT, GL_FALSE, 0, 0);
glGenBuffers(1, &vbo2); // Create new VBO
glBindBuffer(GL_ARRAY_BUFFER, vbo2); // Bind vbo2 as current vertex buffer
// initialize vertex buffer, allocate memory, fill it with data
glBufferData(GL_ARRAY_BUFFER, sizeOfColors, colors, GL_STATIC_DRAW);
// indicate that current VBO should be used with vertex attribute with index 1
glEnableVertexAttribArray(1);
// indicate how vertex attribute 1 should interpret data in connected VBO
glVertexAttribPointer(1, intsPerTriangle, GL_INT, GL_FALSE, 0, 0);
// Create new buffer that will be used to store indices
glGenBuffers(1, &ebo);
// Bind index buffer to corresponding target
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
// ititialize index buffer, allocate memory, fill it with data
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeOfIndices, indices, GL_STATIC_DRAW);
// reset bindings for VAO, VBO and EBO
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// Load and create a texture
GLuint texture1 = loadBMP_custom("uvtemplate3.bmp");
GLuint texture2 = loadBMP_custom("texture1.bmp");
My draw:
// Game loop
while (!glfwWindowShouldClose(window))
{
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
// Render
// Clear the colorbuffer
glClearColor(1.f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
ourShader.Use();
// Bind Textures using texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
//add some cool params
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 0.45f, 0.25f, 0.25f, 0.25f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture1"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture2"), 1);
// Draw container
//glBindVertexArray(VAO);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(vao);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLE_STRIP, numIndices, GL_UNSIGNED_INT, NULL);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
My shader most definitely works, as I can adjust the output by hard-coding the
values from within the vertexShader. My suspicion is I am not passing the values correctly/ in the correct format or not indicating somewhere that the int[6] needs to be included per vertex.
I cannot read anything from my layout (location = 1) in int Base[6]; I've tried just about everything I can think of. Declaring each int individually, trying to read two ivec3's, uint and what ever else I could think of but everything comes back with 0.
The following are my vertex and fragment shader for completeness:
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in int Base[6];
out vec2 TexCoord;
out vec2 TexCoord2;
out vec2 TexCoord3;
out vec2 TexCoord4;
out vec2 TexCoord5;
out vec2 TexCoord6;
// 0.5f, 0.5f,// 0.0f, 118.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Top Right
// 0.5f, -0.5f,// 0.0f, 118.0f, 1.0f, 0.0f, 0.0f,0.009765625f, // Bottom Right
// -0.5f, -0.5f,// 0.0f, 118.0f, 0.0f, 1.0f, 0.009765625f, 0.009765625f, // Bottom Left
// -0.5f, 0.5f//, 0.0f, 118.0f, 1.0f, 0.0f, 0.009765625f, 0.0f // Top Left
void main()
{
int curBase = Base[5];
int curVertex = gl_VertexID % 4;
vec2 texCoord = (curVertex == 0?
vec2(0.0,0.0):(
curVertex == 1?
vec2(0.0,0.009765625):(
curVertex == 2?
vec2(0.009765625,0.0):(
curVertex == 3?
vec2(0.009765625,0.009765625):(
vec2(0.0,0.0)))))
);
gl_Position = vec4(position, 0.0f, 1.0f);
TexCoord = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
//curBase = Base+1;
TexCoord2 = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
//curBase = Base+2;
TexCoord3 = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
}
Fragment:
#version 330 core
//in vec3 ourColor;
in vec2 TexCoord;
in vec2 TexCoord2;
in vec2 TexCoord3;
in vec2 TexCoord4;
in vec2 TexCoord5;
in vec2 TexCoord6;
out vec4 color;
// Texture samplers
uniform sampler2D ourTexture1;
uniform sampler2D ourTexture2;
void main()
{
color = (texture(ourTexture2, TexCoord )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord2 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord3 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord4 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord5 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord6 )== vec4(1.0,0.0,1.0,1.0)?
vec4(0.0f,0.0f,0.0f,0.0f)
:texture(ourTexture2, TexCoord6 ))
:texture(ourTexture2, TexCoord5 ))
:texture(ourTexture2, TexCoord4 ))
:texture(ourTexture2, TexCoord3 ))
:texture(ourTexture2, TexCoord2 ))
:texture(ourTexture2, TexCoord ));
}
This is wrong in two different ways:
glVertexAttribPointer(1, intsPerTriangle, GL_INT, GL_FALSE, 0, 0);
Vertex attributes in the GL can be scalars or vectors of 2 to 4 components. Hence, the size parameter of glVertexAttribPointer can take the values of 1, 2, 3 or 4. Using a different value (intsPerTriangle == 6) means that the call will just generate an GL_INVALID_VALUE error and has no ther effect, so you don't even set a pointer.
If you you want to pass 6 values per vertex, you can either use 6 different scalr attributes (consuming 6 attribute slots), or pack this into some vectors, like 2 3d vectors (consuming only 2 slots). No matter what packing you chose, you'll need a proper attrib pointer setup for each attribute slot in use.
However, glVertexAttribPointer is also the wrong function for your use case. It is defining floating-point attributes, which musthave matching declarations as float/vec* in the shader. The fact that you can input GL_INT just means that the GPU can do the conversion to floating-point on the fly for you.
If you want to use an int or ivec (or their unsigned counterparts) attribute, you have to use glVertexAttribIPointer (note the I in that function name) when setting up the attribute.
Related
I am trying to code a function which automatically populates a mesh's index vector container. The function should work without issue in theory as it generates the proper indices in their correct order; however, the triangles do not form! Instead, I am left with a single line.
My mesh generation code is supposed to build an octahedron and then render it in the main game loop. The mesh class is shown below in its entirety:
struct vertex
{
glm::vec3 position;
glm::vec3 color;
};
class Mesh
{
public:
GLuint VAO, VBO, EBO;
std::vector <vertex> vtx;
std::vector <glm::vec3> idx;
glm::mat4 modelMatrix = glm::mat4(1.f);
Mesh(glm::vec3 position, glm::vec3 scale)
{
vertexGen(6);
idx = indexGen(6);
modelMatrix = glm::scale(glm::translate(modelMatrix, position), scale);
initMesh();
};
void Render(Shader shaderProgram, Camera camera, bool wireframe)
{
glUseProgram(shaderProgram.ID);
glPatchParameteri(GL_PATCH_VERTICES, 3); // Indicates to the VAO that each group of three vertices is one patch (triangles)
glProgramUniformMatrix4fv(shaderProgram.ID, 0, 1, GL_FALSE, glm::value_ptr(modelMatrix));
glProgramUniformMatrix4fv(shaderProgram.ID, 1, 1, GL_FALSE, glm::value_ptr(camera.camMatrix));
glProgramUniform3fv(shaderProgram.ID, 2, 1, glm::value_ptr(camera.Position));
glBindVertexArray(VAO); // Binds the VAO to the shader program
if (wireframe)
{
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_CULL_FACE);
}
else
{
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
//glEnable(GL_CULL_FACE);
}
glDrawElements(GL_PATCHES, idx.size(), GL_UNSIGNED_INT, 0); // Tells the shader program how to draw the primitives
}
private:
void vertexGen(int n) {
// Populate the base six vertices
vtx.push_back(vertex{ glm::vec3( 0.0f, 0.5f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3(-0.5f, 0.0f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.0f, 0.0f, -0.5f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.5f, 0.0f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.0f, 0.0f, 0.5f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.0f,-0.5f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
}
std::vector<glm::vec3> indexGen(int n) {
std::vector<glm::vec3> indices;
// Calculate the indices for the top 4 triangles
indices.push_back(glm::vec3( 0, n - 5, n - 4 ));
indices.push_back(glm::vec3( 0, n - 4, n - 3 ));
indices.push_back(glm::vec3( 0, n - 3, n - 2 ));
indices.push_back(glm::vec3( 0, n - 2, n - 5 ));
// Calculate the indices for the bottom 4 triangles
indices.push_back(glm::vec3( 5, n - 5, n - 4));
indices.push_back(glm::vec3( 5, n - 4, n - 3));
indices.push_back(glm::vec3( 5, n - 3, n - 2));
indices.push_back(glm::vec3( 5, n - 2, n - 5));
return indices;
}
void initMesh()
{
glCreateVertexArrays(1, &VAO); // Sets the address of the uint VAO as the location of a gl vertex array object
glCreateBuffers(1, &VBO); // Sets the address of the uint VBO as the location of a gl buffer object
glCreateBuffers(1, &EBO); // Sets the address of the uint EBO as the location of a gl buffer object
glNamedBufferData(VBO, vtx.size() * sizeof(vtx[0]), vtx.data(), GL_STATIC_DRAW); // Sets the data of the buffer named VBO
glNamedBufferData(EBO, idx.size() * sizeof(idx[0]), idx.data(), GL_STATIC_DRAW); // Sets the data of the buffer named EBO
glEnableVertexArrayAttrib(VAO, 0); // Enables an attribute of the VAO in location 0
glEnableVertexArrayAttrib(VAO, 1); // Enables an attribute of the VAO in location 1
glVertexArrayAttribBinding(VAO, 0, 0); // Layout Location of Position Vectors
glVertexArrayAttribBinding(VAO, 1, 0); // Layout Location of Color Values
glVertexArrayAttribFormat(VAO, 0, 3, GL_FLOAT, GL_FALSE, 0); // Size, and Type of Position Vectors
glVertexArrayAttribFormat(VAO, 1, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat)); // For the Color Values
glVertexArrayVertexBuffer(VAO, 0, VBO, 0, 6 * sizeof(GLfloat)); // Sets the VBO to indicate the start, offset, and stride of vertex data in the VAO
glVertexArrayElementBuffer(VAO, EBO); // Sets the EBO to index the VAO vertex connections
}
};
I took this problem step by step and did all of the basic math on paper. The index generation function returns the expected indices in their correct order as just having the indices written out, but it differs in that the written-out indices generate the desired result whereas the generation function only produces a single line when rendered:
I suspect that the issue lies in my mesh initialization function (initMesh), specifically in the glNamedBufferData or glVertexArrayVertexBuffer, but my knowledge of the functions is very limited. I tried changing the parameter of the glNamedBufferData function to different variations of idx.size()*sizeof(idx[0].x), but that yielded the same results, so I am at a loss. Could someone help me fix this, please?
glm::vec3 is a vector of floats (I think) but you are telling OpenGL to read them as unsigned ints.
Float 0.0 is 0x00000000 (i.e. same as int 0), but float 1.0 is 0x3f800000 (same as int 1065353216). They aren't compatible ways to store numbers. You could try glm::ivec3 which is a vector of ints, but I think most people would use std::vector<int> (or unsigned int) and use 3 entries per triangle.
I think it's okay in this case, but I don't like to use types like ivec3 when I mean to have 3 separate ints isn't always a good practice, because the compiler can insert padding in unexpected places. It's possible that on some platforms, ivec3 could be 3 ints and an extra 4 bytes of padding, making 16 bytes in total, and the extra padding bytes throw off the layout you're relying on. glDrawArrays wouldn't skip over padding after every 3 indices and there would be no way to tell it to do that. It's okay for vertices, since you can tell OpenGL exactly where the data is.
I have drawn a box. I added the basic class that deals vao and vbo.
class BasicObject_V2
{
public:
BasicObject_V2(GLuint typeOfPrimitives = 0){
this->typeOfPrimitives = typeOfPrimitives;
switch (this->typeOfPrimitives){
case GL_QUADS: numVertsPerPrimitive = 4; break;
}}
void allocateMemory(){
glGenVertexArrays(1, &vaoID);
glBindVertexArray(vaoID);
glGenBuffers(1, &vboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertexAttributes), &vertices[0], GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 48, (const void*)0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 48, (const void*)12);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, 48, (const void*)24);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 48, (const void*)40);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
}
GLuint vboID;
GLuint vaoID;
GLuint typeOfPrimitives;
GLuint numVertsPerPrimitive;
int getNumberOfVertices(){
return vertices.size();
}
struct vertexAttributes{
glm::vec3 pos;
glm::vec3 normal;
glm::vec4 color;
glm::vec2 texCoord;
};
std::vector<vertexAttributes> vertices;
};
Here is the my Box-class.
class Box_V2 : public BasicObject_V2 {
public:
Box_V2(float width = 1, float height = 1, float length = 1) : BasicObject_V2(GL_QUADS) {
float wHalf = width / 2.0f;
float hHalf = height / 2.0f;
float lHalf = length / 2.0f;
vertexAttributes va;
glm::vec3 corners[8];
corners[0] = glm::vec3(-wHalf, -hHalf, lHalf);
corners[1] = (...)
glm::vec3 normals[6];
normals[0] = glm::vec3(0, 0, 1); // front
//(...)
// FRONT
va.pos = corners[0];
va.normal = normals[0];
va.texCoord = glm::vec2(0.0, 0.0);
vertices.push_back(va);
//...
allocateMemory();
}
};
Also I have set the texture for it (acoording to the opengl superbible example), but it does work at all.
static GLubyte checkImage[128][128][4];
void makeCheckImage(void)
{
int i, j, c;
for (i = 0; i<128; i++) {
for (j = 0; j<128; j++) {
c = ((((i & 0x8) == 0) ^ ((j & 0x8)) == 0)) * 255;
checkImage[i][j][0] = (GLubyte)c;
checkImage[i][j][1] = (GLubyte)c;
checkImage[i][j][2] = (GLubyte)c;
checkImage[i][j][3] = (GLubyte)255;
}}}
void init(){
glClearColor(0.7, 0.4, 0.6, 1);
//Create a Texture
makeCheckImage();
//Texure
glGenTextures(4, tex);
//glCreateTextures(GL_TEXTURE_2D, 4, tex);//4.5
glTextureStorage2D(tex[0], 0, GL_RGBA32F, 2, 2);
glBindTexture(GL_TEXTURE_2D, tex[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGBA, GL_FLOAT, checkImage);
//glTextureSubImage2D(tex[0], 0, 0, 0, 2, 2, GL_RGBA, GL_FLOAT, checkImage);//4.5
//load the shader
programID = loadShaders("vertex.vsh", "fragment.fsh");
glUseProgram(programID);
//create a box
box_v2 = new Box_V2();
glBindTexture(GL_TEXTURE_2D, tex[0]);
}}
void drawing(BasicObject* object){
glBindVertexArray(object->vaoID);
glDrawArrays(object->typeOfPrimitives, 0, object->getNumberOfVertices() * object->numVertsPerPrimitive);
glBindVertexArray(0);
}
This is my shaders:
VS:
#version 450 core
layout(location = 0) in vec3 pos;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
layout(location = 3) in vec2 texCoord;
uniform mat4 rotate;(...)
out vec4 v_normal;
out vec4 v_color;
out vec4 v_pos;
out vec2 v_texCoord;
void main(){
v_normal = rotate*vec4(normal,1);//*model;
v_color=color;
v_texCoord=texCoord;
gl_Position = rotate*scale*trans*vec4(pos.xyz, 1);
}
And FS:
#version 450 core
in vec4 v_color;
in vec2 v_texCoord;
uniform sampler2D ourTexture;
out vec4 outColor;
void main(){
//outColor=texelFetch(ourTexture, ivec2(gl_FragCoord.xy), 0); //4.5
outColor=texture(ourTexture,v_texCoord*vec2(3.0, 1.0));
}
Now it draws a black box without any texture on it. I'm new in OpenGL and I can't find where is the problem.
There are a lot of things wrong with your code. It looks like you tried to write it one way, then tried to write it another way, leading to some kind of Frankenstein's code where it's not really one way or the other.
glGenTextures(4, tex);
//glCreateTextures(GL_TEXTURE_2D, 4, tex);//4.5
glTextureStorage2D(tex[0], 0, GL_RGBA32F, 2, 2);
This is a perfect example. You use the non-DSA glGenTextures call, but you immediately turn around and use the DSA glTextureStorage2D call on it. You cannot do that. Why? Because tex[0] hasn't been created yet.
glGenTextures only allocates the name for the texture, not its state data. Only by binding the texture does it get contents. That's why DSA introduced the glCreate* functions; they allocate both the name and state for the object. This is also why glCreateTextures takes a target; that target is part of the texture object's state.
In short, the commented out call was correct all along. Looking at your code as written, the glTextureStorage2D call fails with an OpenGL error (FYI: turn on KHR_debug to see when errors happen).
So let's look at what your code would look like if we take out the failure parts:
glGenTextures(4, tex);
glBindTexture(GL_TEXTURE_2D, tex[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGBA, GL_FLOAT, checkImage);
There are two problems here. First, as BDL mentioned, your data is described incorrectly. You're passing GLubytes, but you told OpenGL you were passing GLfloats. Don't lie to OpenGL.
Second, there's the internal format. Because you used an unsized internal format (and because you're not in OpenGL ES land), the format your implementation chooses will be unsigned normalized. It won't be a 32-bit floating point format. If you want one of those, you must explicitly ask for it, with GL_RGBA32F.
Furthermore, because you allocated mutable storage for your texture (ie: because glTextureStorage2D failed), you now have a problem. You only allocated one mipmap level, but the default filtering mode will try to fetch from multiple mipmaps. And you never set the texture's mipmap range to only sample from the first. So your texture is not complete. An immutable storage texture would have made the mipmap range match what you allocated, but mutable storage textures aren't so nice.
You should always set your mipmap range when creating mutable storage textures:
glTexParameteri(GL_TEXTURE2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE2D, GL_TEXTURE_MAX_LEVEL, 0); //It's a closed range.
Lastly, you bind the texture to unit 0 for rendering (mostly by accident, since you never call glActiveTexture). But you never bother to inform your shader of this, by setting the ourTexture uniform value to 0.
There are several Problems with your code:
Data type doesn't match data
The data comes in an unsigned byte format (GLubyte checkImage[128][128][4]) with a size of of 4 bytes per pixel (1 byte per channel), but you tell OpenGL that it is in GL_FLOAT format, which would require 16 bytes per pixel (4 bytes per channel).
Data storage
See answer from 246Nt.
Mipmaps/Filters
Per default OpenGL has the minification filter set to GL_LINEAR_MIPMAP_LINEAR which requries the user to supply valid mipmaps for the texture. One can either change this filter to GL_LINEAR or GL_NEAREST which do not require mipmaps (see glTexParameteri), or generate mipmaps (glGenerateMipmaps).
I am trying to render 3D models with textures using Assimp. The conversion goes perfect, all textures positions and what not gets loaded. I have tested the texture images by drawing them to the screen in 2D.
For some reason it does not render the textures to the model.
I am a beginner in OpenGL so forgive me if i dont explain it right.
The tutorial I have based the code on is from here, but i stripped a big part since I have my own camera/movement system.
The model renders like this: http://i.stack.imgur.com/5sK9K.png
whilest the texture in use looks like this: http://i.stack.imgur.com/sWGp7.jpg
The relevant rendering code is the following:
Generating textures from data file:
int Mesh::LoadGLTextures(const aiScene* scene){
if (scene->HasTextures()) return -1; //yes this is correct
/* getTexture Filenames and Numb of Textures */
for (unsigned int m = 0; m<scene->mNumMaterials; m++){
int texIndex = 0;
aiReturn texFound;
aiString path; // filename
while ((texFound = scene->mMaterials[m]->GetTexture(aiTextureType_DIFFUSE, texIndex, &path)) == AI_SUCCESS){
textureIdMap[path.data] = NULL; //fill map with textures, pointers still NULL yet
texIndex++;
}
}
int numTextures = textureIdMap.size();
/* create and fill array with GL texture ids */
GLuint* textureIds = new GLuint[numTextures];
/* get iterator */
std::map<std::string, GLuint>::iterator itr = textureIdMap.begin();
std::string basepath = getBasePath(path);
ALLEGRO_BITMAP *image;
for (int i = 0; i<numTextures; i++){
std::string filename = (*itr).first; // get filename
(*itr).second = textureIds[i]; // save texture id for filename in map
itr++; // next texture
std::string fileloc = basepath + filename; /* Loading of image */
image = al_load_bitmap(fileloc.c_str());
if (image) /* If no error occured: */{
GLuint texId = al_get_opengl_texture(image);
//glGenTextures(numTextures, &textureIds[i]); /* Texture name generation */
glBindTexture(GL_TEXTURE_2D, texId); /* Binding of texture name */
//redefine standard texture values
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); /* We will use linear
interpolation for magnification filter */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); /* We will use linear
interpolation for minifying filter */
textureIdMap[filename] = texId;
} else {
/* Error occured */
std::cout << "Couldn't load Image: " << fileloc.c_str() << "\n";
}
}
//Cleanup
delete[] textureIds;
//return success
return true;
}
Generating VBO/VAO:
void Mesh::genVAOsAndUniformBuffer(const aiScene *sc) {
struct MyMesh aMesh;
struct MyMaterial aMat;
GLuint buffer;
// For each mesh
for (unsigned int n = 0; n < sc->mNumMeshes; ++n){
const aiMesh* mesh = sc->mMeshes[n];
// create array with faces
// have to convert from Assimp format to array
unsigned int *faceArray;
faceArray = (unsigned int *)malloc(sizeof(unsigned int) * mesh->mNumFaces * 3);
unsigned int faceIndex = 0;
for (unsigned int t = 0; t < mesh->mNumFaces; ++t) {
const aiFace* face = &mesh->mFaces[t];
memcpy(&faceArray[faceIndex], face->mIndices, 3 * sizeof(unsigned int));
faceIndex += 3;
}
aMesh.numFaces = sc->mMeshes[n]->mNumFaces;
// generate Vertex Array for mesh
glGenVertexArrays(1, &(aMesh.vao));
glBindVertexArray(aMesh.vao);
// buffer for faces
glGenBuffers(1, &buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * mesh->mNumFaces * 3, faceArray, GL_STATIC_DRAW);
// buffer for vertex positions
if (mesh->HasPositions()) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * mesh->mNumVertices, mesh->mVertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(vertexLoc);
glVertexAttribPointer(vertexLoc, 3, GL_FLOAT, 0, 0, 0);
}
// buffer for vertex normals
if (mesh->HasNormals()) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * mesh->mNumVertices, mesh->mNormals, GL_STATIC_DRAW);
glEnableVertexAttribArray(normalLoc);
glVertexAttribPointer(normalLoc, 3, GL_FLOAT, 0, 0, 0);
}
// buffer for vertex texture coordinates
if (mesh->HasTextureCoords(0)) {
float *texCoords = (float *)malloc(sizeof(float) * 2 * mesh->mNumVertices);
for (unsigned int k = 0; k < mesh->mNumVertices; ++k) {
texCoords[k * 2] = mesh->mTextureCoords[0][k].x;
texCoords[k * 2 + 1] = mesh->mTextureCoords[0][k].y;
}
glGenBuffers(1, &buffer);
glEnableVertexAttribArray(texCoordLoc);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 2 * mesh->mNumVertices, texCoords, GL_STATIC_DRAW);
glVertexAttribPointer(texCoordLoc, 2, GL_FLOAT, GL_FALSE, 0, 0);
}
// unbind buffers
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// create material uniform buffer
aiMaterial *mtl = sc->mMaterials[mesh->mMaterialIndex];
aiString texPath; //contains filename of texture
if (AI_SUCCESS == mtl->GetTexture(aiTextureType_DIFFUSE, 0, &texPath)){
//bind texture
unsigned int texId = textureIdMap[texPath.data];
aMesh.texIndex = texId;
aMat.texCount = 1;
} else {
aMat.texCount = 0;
}
float c[4];
set_float4(c, 0.8f, 0.8f, 0.8f, 1.0f);
aiColor4D diffuse;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_DIFFUSE, &diffuse))
color4_to_float4(&diffuse, c);
memcpy(aMat.diffuse, c, sizeof(c));
set_float4(c, 0.2f, 0.2f, 0.2f, 1.0f);
aiColor4D ambient;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_AMBIENT, &ambient))
color4_to_float4(&ambient, c);
memcpy(aMat.ambient, c, sizeof(c));
set_float4(c, 0.0f, 0.0f, 0.0f, 1.0f);
aiColor4D specular;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_SPECULAR, &specular))
color4_to_float4(&specular, c);
memcpy(aMat.specular, c, sizeof(c));
set_float4(c, 0.0f, 0.0f, 0.0f, 1.0f);
aiColor4D emission;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_EMISSIVE, &emission))
color4_to_float4(&emission, c);
memcpy(aMat.emissive, c, sizeof(c));
float shininess = 0.0;
unsigned int max;
aiGetMaterialFloatArray(mtl, AI_MATKEY_SHININESS, &shininess, &max);
aMat.shininess = shininess;
glGenBuffers(1, &(aMesh.uniformBlockIndex));
glBindBuffer(GL_UNIFORM_BUFFER, aMesh.uniformBlockIndex);
glBufferData(GL_UNIFORM_BUFFER, sizeof(aMat), (void *)(&aMat), GL_STATIC_DRAW);
myMeshes.push_back(aMesh);
}
}
Rendering model:
void Mesh::recursive_render(const aiScene *sc, const aiNode* nd){
// draw all meshes assigned to this node
for (unsigned int n = 0; n < nd->mNumMeshes; ++n){
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, myMeshes[nd->mMeshes[n]].texIndex);
// bind VAO
glBindVertexArray(myMeshes[nd->mMeshes[n]].vao);
// draw
glDrawElements(GL_TRIANGLES, myMeshes[nd->mMeshes[n]].numFaces * 3, GL_UNSIGNED_INT, 0);
}
// draw all children
for (unsigned int n = 0; n < nd->mNumChildren; ++n){
recursive_render(sc, nd->mChildren[n]);
}
}
Any other relevant code parts can be found in my open github project https://github.com/kwek20/StrategyGame/tree/master/Strategy
Mesh.cpp is relevant, as well as main.cpp and Camera.cpp.
As far as I understaind I followed the guidelines well, created a VAO, created VBOs, added data and enabled the proper vertex array attriute tot render the scene with.
I have checked all the data variables and everything is filled according to plan
Could anyone here spot the mistake I have made and or explain it?
Some links are typed weird because of the limit I have :(
It would help if you posted your shaders also.
I can post some rendering code with textures if that helps you out:
Generating the texture for opengl and loading a grayscale (UC8) image with width and height into the GPU
void GLRenderer::getTexture(unsigned char * image, int width, int height)
{
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &mTextureID);
glBindTexture(GL_TEXTURE_2D, mTextureID);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGB8, width, height);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_BGR, GL_UNSIGNED_BYTE, image);
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
glBindTexture(GL_TEXTURE_2D, 0);
}
Loading the vertices from assimp onto the gpu
//** buffer a obj file-style model, initialize the VAO
void GLRenderer::bufferModel(float* aVertexArray, int aNumberOfVertices, float* aNormalArray, int aNumberOfNormals, float* aUVList, int aNumberOfUVs, unsigned int* aIndexList, int aNumberOfIndices)
{
//** just to be sure we are current
glfwMakeContextCurrent(mWin);
//** Buffer all data in VBOs
glGenBuffers(1, &mVertex_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mVertex_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfVertices * 3, aVertexArray, GL_STATIC_DRAW);
glGenBuffers(1, &mNormal_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mNormal_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfNormals * 3, aNormalArray, GL_STATIC_DRAW);
glGenBuffers(1, &mUV_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mUV_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfUVs * 2, aUVList, GL_STATIC_DRAW);
glGenBuffers(1, &mIndex_buffer_object);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndex_buffer_object);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * aNumberOfIndices, aIndexList, GL_STATIC_DRAW);
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
//** VAO tells our shaders how to match up data from buffer to shader input variables
glGenVertexArrays(1, &mVertex_array_object);
glBindVertexArray(mVertex_array_object);
//** vertices first
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, mVertex_buffer_object);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
//** normals next
if (aNumberOfNormals > 0){
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, mNormal_buffer_object);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
}
//** UVs last
if (aNumberOfUVs > 0){
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, mUV_buffer_object);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, NULL);
}
//** indexing for reusing vertices in triangle-meshes
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndex_buffer_object);
//** check errors and store the number of vertices
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
mNumVert = aNumberOfVertices;
mNumNormals = aNumberOfNormals;
mNumUVs = aNumberOfUVs;
mNumIndices = aNumberOfIndices;
}
The code above is called like:
//read vertices from file
std::vector<float> vertex, normal, uv;
std::vector<unsigned int> index;
//assimp-wrapping function to load obj to vectors
aux::loadObjToVectors("Resources\\vertices\\model.obj", vertex, normal, index, uv);
mPtr->bufferModel(&vertex[0], static_cast<int>(vertex.size()) / 3, &normal[0], static_cast<int>(normal.size()) / 3, &uv[0], static_cast<int>(uv.size()) / 2, &index[0], static_cast<int>(index.size()));
Then comes the shader-part:
In the vertex shader you just hand-through the UV-coordinate layer
#version 400 core
layout (location = 0) in vec3 vertexPosition_modelspace;
layout (location = 1) in vec3 vertexNormal_modelspace;
layout (location = 2) in vec2 vertexUV;
out vec2 UV;
[... in main then ...]
UV = vertexUV;
While in the fragment shader you assign the value to the pixel:
#version 400 core
in vec2 UV;
uniform sampler2D textureSampler;
layout(location = 0) out vec4 outColor;
[... in main then ...]
// you probably want to calculate lighting here then too, so its just the simplest way to get the texture inside
outColor = vec4(texture2D(textureSampler, UV).rgb, cosAngle);
//you can also check whether the UV coords are correctly bound by using:
outColor = vec4(UV.x, UV.y,1,1);
//and then checking the pixel-values in the resulting image (e.g. render it to a PBO and then download it onto the CPU for)
In the rendering loop also make sure that all the uniforms are correctly bound (especially texture related ones) and that the texture is active and bound
if (mTextureID != -1) {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mTextureID);
}
GLint textureLocation = glGetUniformLocation(mShaderProgram, "textureSampler");
glUniform1i(textureLocation, 0);
//**set the poligon mode
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
//**drawElements because of indexing
glDrawElements(GL_TRIANGLES, mNumIndices, GL_UNSIGNED_INT, 0);
I hope I could help you!
Kind regards,
VdoP
I'm having a really weird issue with depth testing here.
I'm rendering a simple mesh in an OpenGL 3.3 core profile context on Windows, with depth testing enabled and glDepthFunc set to GL_LESS. On my machine (a laptop with a nVidia Geforce GTX 660M), everything is working as expected, the depth test is working, this is what it looks like:
Now, if I run the program on a different PC, a tower with a Radeon R9 280, it looks more like this:
Strange enough, the really weird thing is that when I call glEnable(GL_DEPTH_TEST) every frame before drawing, the result is correct on both machines.
As it's working when I do that, I figure the depth buffer is correctly created on both machines, it just seems that the depth test is somehow being disabled before rendering when I enable it only once at initialization.
Here's the minimum code that could somehow be part of the problem:
Code called at initialization, after a context is created and made current:
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
Code called every frame before the buffer swap:
glClearColor(0.4f, 0.6f, 0.8f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// mShaderProgram->getID() simply returns the handle of a simple shader program
glUseProgram(mShaderProgram->getID());
glm::vec3 myColor = glm::vec3(0.7f, 0.5f, 0.4f);
GLuint colorLocation = glGetUniformLocation(mShaderProgram->getID(), "uColor");
glUniform3fv(colorLocation, 1, glm::value_ptr(myColor));
glm::mat4 modelMatrix = glm::mat4(1.0f);
glm::mat4 viewMatrix = glm::lookAt(glm::vec3(0.0f, 3.0f, 5.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
glm::mat4 projectionMatrix = glm::perspectiveFov(60.0f, (float)mWindow->getProperties().width, (float)mWindow->getProperties().height, 1.0f, 100.0f);
glm::mat4 inverseTransposeMVMatrix = glm::inverseTranspose(viewMatrix*modelMatrix);
GLuint mMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uModelMatrix");
GLuint vMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uViewMatrix");
GLuint pMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uProjectionMatrix");
GLuint itmvMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uInverseTransposeMVMatrix");
glUniformMatrix4fv(mMatrixLocation, 1, GL_FALSE, glm::value_ptr(modelMatrix));
glUniformMatrix4fv(vMatrixLocation, 1, GL_FALSE, glm::value_ptr(viewMatrix));
glUniformMatrix4fv(pMatrixLocation, 1, GL_FALSE, glm::value_ptr(projectionMatrix));
glUniformMatrix4fv(itmvMatrixLocation, 1, GL_FALSE, glm::value_ptr(inverseTransposeMVMatrix));
// Similiar to the shader program, mMesh.gl_vaoID is simply the handle of a vertex array object
glBindVertexArray(mMesh.gl_vaoID);
glDrawArrays(GL_TRIANGLES, 0, mMesh.faces.size()*3);
With the above code, I'll get the wrong output on the Radeon.
Note: I'm using GLFW3 for context creation and GLEW for the function pointers (and obviously GLM for the math).
The vertex array object contains three attribute array buffers, for positions, uv coordinates and normals. Each of these should be correctly configured and send to the shaders, as everything is working fine when enabling the depth test every frame.
I should also mention that the Radeon machine runs Windows 8 while the nVidia machine runs Windows 7.
Edit: By request, here's the code used to load the mesh and create the attribute data. I do not create any element buffer objects as I am not using element draw calls.
std::vector<glm::vec3> positionData;
std::vector<glm::vec2> uvData;
std::vector<glm::vec3> normalData;
std::vector<meshFaceIndex> faces;
std::ifstream fileStream(path);
if (!fileStream.is_open()){
std::cerr << "ERROR: Could not open file '" << path << "!\n";
return;
}
std::string lineBuffer;
while (std::getline(fileStream, lineBuffer)){
std::stringstream lineStream(lineBuffer);
std::string typeString;
lineStream >> typeString; // Get line token
if (typeString == TOKEN_VPOS){ // Position
glm::vec3 pos;
lineStream >> pos.x >> pos.y >> pos.z;
positionData.push_back(pos);
}
else{
if (typeString == TOKEN_VUV){ // UV coord
glm::vec2 UV;
lineStream >> UV.x >> UV.y;
uvData.push_back(UV);
}
else{
if (typeString == TOKEN_VNORMAL){ // Normal
glm::vec3 normal;
lineStream >> normal.x >> normal.y >> normal.z;
normalData.push_back(normal);
}
else{
if (typeString == TOKEN_FACE){ // Face
meshFaceIndex faceIndex;
char interrupt;
for (int i = 0; i < 3; ++i){
lineStream >> faceIndex.positionIndex[i] >> interrupt
>> faceIndex.uvIndex[i] >> interrupt
>> faceIndex.normalIndex[i];
}
faces.push_back(faceIndex);
}
}
}
}
}
fileStream.close();
std::vector<glm::vec3> packedPositions;
std::vector<glm::vec2> packedUVs;
std::vector<glm::vec3> packedNormals;
for (auto f : faces){
Face face; // Derp derp;
for (auto i = 0; i < 3; ++i){
if (!positionData.empty()){
face.vertices[i].position = positionData[f.positionIndex[i] - 1];
packedPositions.push_back(face.vertices[i].position);
}
else
face.vertices[i].position = glm::vec3(0.0f);
if (!uvData.empty()){
face.vertices[i].uv = uvData[f.uvIndex[i] - 1];
packedUVs.push_back(face.vertices[i].uv);
}
else
face.vertices[i].uv = glm::vec2(0.0f);
if (!normalData.empty()){
face.vertices[i].normal = normalData[f.normalIndex[i] - 1];
packedNormals.push_back(face.vertices[i].normal);
}
else
face.vertices[i].normal = glm::vec3(0.0f);
}
myMesh.faces.push_back(face);
}
glGenVertexArrays(1, &(myMesh.gl_vaoID));
glBindVertexArray(myMesh.gl_vaoID);
GLuint positionBuffer; // positions
glGenBuffers(1, &positionBuffer);
glBindBuffer(GL_ARRAY_BUFFER, positionBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3)*packedPositions.size(), &packedPositions[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
GLuint uvBuffer; // uvs
glGenBuffers(1, &uvBuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2)*packedUVs.size(), &packedUVs[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
GLuint normalBuffer; // normals
glGenBuffers(1, &normalBuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3)*packedNormals.size(), &packedNormals[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
The .obj loading routine is mostly adapted from this one:
http://www.limegarden.net/2010/03/02/wavefront-obj-mesh-loader/
This doesn't look like a depth testing issue to me, but more like misalignment in the vertex / index array data. Please show us the code in which you load the vertex buffer objects and the element buffer objects.
It is because of the function ChoosePixelFormat.
In my case the ChoosePixelFormat returns a pixelformat ID with value 8 which provides a depth buffer with 16 bits instead of the required 24 bits.
One simple fix was to set the ID manually to the value of 11 instead of 8 to get a suitable pixelformat for the application with 24 bits of depth-buffer.
After many painful hours of attempting to figure out why my lighting is messed up I am still at a loss.
The OpenGL normals are correct (backface culling does not cause any of my triangles to disappear)
I calculate my normals in order to interpolate for lighting, all the triangles on the same faces also have the same normals.
If any one has any thoughts that would be appreciated.
I am definitely new to OpenGL, so that is a bit obvious in my code.
here are my shaders:
vertex shader
#version 330 core
layout(location = 0) in vec3 Position;
layout(location = 1) in vec3 vertexColor;
in vec3 vNormal;
out vec3 fragmentColor; // Output data ; will be interpolated for each fragment.
uniform mat4 MVP;
uniform mat4 transformMatrix;
uniform vec4 LightPosition;
// output values that will be interpretated per-fragment
out vec3 fN;
out vec3 fE;
out vec3 fL;
void main()
{
fN = vNormal;
fE = Position.xyz;
fL = LightPosition.xyz;
if( LightPosition.w != 0.0 ) {
fL = LightPosition.xyz - Position.xyz;
}
// Output position of the vertex, in clip space : MVP * position
vec4 v = vec4(Position,1); // Transform in homoneneous 4D vector
gl_Position = MVP * v;
//gl_Position = MVP * v;
// The color of each vertex will be interpolated
// to produce the color of each fragment
//fragmentColor = vertexColor; // take out at some point
}
and the fragmentShader, using phong shading
#version 330
//out vec3 color;
// per-fragment interpolated values from the vertex shader
in vec3 fN;
in vec3 fL;
in vec3 fE;
out vec4 fColor;
uniform vec4 AmbientProduct, DiffuseProduct, SpecularProduct;
uniform mat4 ModelView;
uniform vec4 LightPosition;
uniform float Shininess;
in vec3 fragmentColor; // Interpolated values from the vertex shaders
void main()
{
// Normalize the input lighting vectors
vec3 N = normalize(fN);
vec3 E = normalize(fE);
vec3 L = normalize(fL);
vec3 H = normalize( L + E );
vec4 ambient = AmbientProduct;
float Kd = max(dot(L, N), 0.0);
vec4 diffuse = Kd*DiffuseProduct;
float Ks = pow(max(dot(N, H), 0.0), Shininess);
vec4 specular = Ks*SpecularProduct;
// discard the specular highlight if the light's behind the vertex
if( dot(L, N) < 0.0 ) {
specular = vec4(0.0, 0.0, 0.0, 1.0);
}
fColor = ambient + diffuse + specular;
fColor.a = 1.0;
//color = vec3(1,0,0);
// Output color = color specified in the vertex shader,
// interpolated between all 3 surrounding vertices
//color = fragmentColor;
}
void setMatrices()
{
GLfloat FoV = 45; // the zoom of the camera
glm::vec3 cameraPosition(4,3,3), // the position of your camera, in world space // change to see what happends
cameraTarget(0,0,0), // where you want to look at, in world space
upVector(0,-1,0);
// Projection matrix : 45° Field of View, 4:3 ratio, display range : 0.1 unit <-> 100 units
glm::mat4 Projection = glm::perspective(FoV, 3.0f / 3.0f, 0.001f, 100.0f); // ratio needs to change here when the screen size/ratio changes
// Camera matrix
glm::mat4 View = glm::lookAt(
cameraPosition, // Camera is at (4,3,3), in World Space
cameraTarget, // and looks at the origin
upVector // Head is up (set to 0,-1,0 to look upside-down)
);
// Model matrix : an identity matrix (model will be at the origin)
glm::mat4 Model = glm::mat4(1.0f); // Changes for each model !
// Our ModelViewProjection : multiplication of our 3 matrices
glm::mat4 MVP = Projection * View * Model * transformMatrix; //matrix multiplication is the other way around
// Get a handle for our "MVP" uniform.
// Only at initialisation time.
GLuint MatrixID = glGetUniformLocation(programID, "MVP");
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
// For each model you render, since the MVP will be different (at least the M part)
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
RotationID = glGetUniformLocation(programID,"transformMatrix");
//lighting
cubeNormal = glGetAttribLocation( programID, "vNormal" );
}
void setBuffers()
{
// Get a vertex array object
GLuint VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glUseProgram(programID);
// cube buffer objects
glGenBuffers(1, &CubeVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(CubeBufferData), CubeBufferData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
// cube normal objects
glGenBuffers(1, &CubeNormalbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, CubeNormalbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(CubeNormalBufferData), CubeNormalBufferData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//octahedron buffer objects
glGenBuffers(1, &OctaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(octahedronBufData), octahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//tetrahedron buffer objects
glGenBuffers(1, &TetraVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(tetrahedronBufData), tetrahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//dodecahedron buffer objects
glGenBuffers(1, &DodecaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(dodecahedronBufData), dodecahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//icosahedron buffer objects
glGenBuffers(1, &icosaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(icosahedronBufData), icosahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//sphere buffer objects
glGenBuffers(1, &sphereVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(sphereBufData), sphereBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
glGenBuffers(1, &colorbuffer);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_color_buffer_data), g_color_buffer_data, GL_STATIC_DRAW);
// lighting stuff
// Initialize shader lighting parameters
point4 light_position= { 0.0, 20.0, -10.0, 0.0 };
color4 light_ambient ={ 0.2, 0.2, 0.2, 1.0 };
color4 light_diffuse ={ 1.0, 1.0, 1.0, 1.0 };
color4 light_specular ={ 1.0, 1.0, 1.0, 1.0 };
color4 material_ambient ={ 1.0, 0.0, 1.0, 1.0 };
color4 material_diffuse ={ 1.0, 0.8, 0.0, 1.0 };
color4 material_specular ={ 1.0, 0.8, 0.0, 1.0 };
float material_shininess = 20.0;
color4 ambient_product;
color4 diffuse_product;
color4 specular_product;
int i;
for (i = 0; i < 3; i++) {
ambient_product[i] = light_ambient[i] * material_ambient[i];
diffuse_product[i] = light_diffuse[i] * material_diffuse[i];
specular_product[i] = light_specular[i] * material_specular[i];
}
//printColor("diffuse", diffuse_product);
//printColor("specular", specular_product);
glUniform4fv( glGetUniformLocation(programID, "AmbientProduct"),
1, ambient_product );
glUniform4fv( glGetUniformLocation(programID, "DiffuseProduct"),
1, diffuse_product );
glUniform4fv( glGetUniformLocation(programID, "SpecularProduct"),
1, specular_product );
glUniform4fv( glGetUniformLocation(programID, "LightPosition"),
1, light_position );
glUniform1f( glGetUniformLocation(programID, "Shininess"),
material_shininess );
}
and some more....
void display()
{
setMatrices(); // initilize Matrices
// Use our shader
//glUseProgram(programID);
glClearColor(0.0f, 0.0f, 0.3f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(0); // 1rst attribute buffer : vertices
// enum platosShapes{tet, cube, octah, dodec, icos};
switch(shapeInUse)
{
case tet:
{
glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 4*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case cube:
{
//GLuint cubeNormal = glGetAttribLocation( programID, "vNormal" );
glEnableVertexAttribArray( cubeNormal );
glVertexAttribPointer( cubeNormal, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid *) (sizeof(CubeNormalBufferData)) );
//glDisableVertexAttribArray( cubeNormal );
glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 12*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case octah:
{
glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 8*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case dodec:
{
glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLE_FAN, 0, 5 * 6); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
glDrawArrays(GL_TRIANGLE_FAN, (5 * 6) + 1, 30);
//glutSolidDodecahedron();
//glDrawArrays(GL_TRIANGLE_STRIP,0,5*12);
}
break;
case icos:
{
glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 3*20); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case sphere:
{
glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
//glDrawElements(GL_TRIANGLES, cnt2, GL_UNSIGNED_INT, 0)
glDrawArrays(GL_TRIANGLE_FAN, 0, 100);
}
}
glDisableVertexAttribArray(0);
glFlush();
}
and some more........
void calculateNormals(GLfloat bufData[], GLfloat normBufData[], int size) // probalby works
{
int count = 0;
GLfloat temp[9];
for(int i = 0; i < size; i++)
{
temp[count] = bufData[i];
count++;
if((i+1) % 9 == 0)
{
count = 0;
//for(int i = 0; i < 9; i++)
//{
// cout << temp[i] << "!,";
// if((i + 1) % 3 == 0)
// cout << "\n";
//}
calculateCross(temp, normBufData);
}
}
printNormals(normBufData, size);
}
void calculateCross(GLfloat bufData[], GLfloat normBufData[]) // probably works
{
static int counter = 0; // need to reset in bettween new buffers
glm::vec3 C1;
glm::vec3 C2;
glm::vec3 normal;
//cout << bufData[0] << "," << bufData[1] << "," << bufData[2] << " buf 1 \n";
//cout << bufData[3] << "," << bufData[4] << "," << bufData[5] << " buf 2 \n";
//cout << bufData[6] << "," << bufData[7] << "," << bufData[8] << " buf 3 \n\n";
//C1.x = bufData[3] - bufData[0];
//C1.y = bufData[4] - bufData[1];
//C1.z = bufData[5] - bufData[2];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
C1.x = bufData[0] - bufData[3];
C1.y = bufData[1] - bufData[4];
C1.z = bufData[2] - bufData[5];
C2.x = bufData[0] - bufData[6];
C2.y = bufData[1] - bufData[7];
C2.z = bufData[2] - bufData[8];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
//cout << C1.x << " 1x \n";
//cout << C1.y << " 1y \n";
//cout << C1.z << " 1z \n";
//cout << C2.x << " 2x \n";
//cout << C2.y << " 2y \n";
//cout << C2.z << " 2z \n";
normal = glm::cross(C1, C2);
//cout << "\nNORMAL : " << normal.x << "," << normal.y << "," << normal.z << " counter = " << counter << "\n";
for(int j = 0; j < 3; j++)
{
for(int i = 0; i < 3; i++)
{
normBufData[counter] = normal.x;
normBufData[counter + 1] = normal.y;
normBufData[counter + 2] = normal.z;
}
counter+=3;
}
}
and main.....
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(700, 700); // Window Size
glutCreateWindow("Michael - Lab 3");
glutDisplayFunc(display);
glutTimerFunc(10, timeFucn, 10);
glutIdleFunc(Idle);
glutKeyboardFunc(keyboard);
glewExperimental = GL_TRUE;
glewInit();
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST); // Enable depth test
glDepthFunc(GL_LESS); // Accept fragment if it closer to the camera than the former one
GenerateSphere(); // this function generates points for the sphere
programID = LoadShader( "VertexShader.glsl", "FragmentShader.glsl" ); // Create and compile our GLSL program from the shaders
setBuffers(); // initilize buffers
calculateNormals(CubeBufferData,CubeNormalBufferData,108); // calculate norms
//printNormals(CubeNormalBufferData);
glutMainLoop();
}
You forgot to bind the buffer object with normals before calling glVertexAttribPointer( cubeNormal, 3,....);. Therefore, the actual data for normals is taken from the color buffer, which causes weirdest Phong evaluation result.
BTW, nice coding style :)
Phong and Gouraud shadings are not applicable to objects with all planar surfaces, e.g. a cube.