Bindless array texture not working - opengl

I store an object's material in a array texture 2D comprised of the diffuse, bump, specular, etc. The handle to this texture array is then stored per vertex, along side the vertex, uv, normal arrays in a VAO, instead of using UBO's or SSBO's to achieve the same purpose.
My goal is to compress an entire model into a single vertex array object, since a model may have many meshes and each mesh may have its own texture. This way with the texture handle stored per vertex, I could just render the entire model in 1 draw call.
The problem is that all object's render black, suggesting to me that the sampler isn't working or somehow the handle isn't getting transferred all the way through. The shader program compiles successfully, and the geometry is rendering correctly (that is, I can substitute the final diffuse color with a fixed color and see the scene).
Texture Creation:
void TextureManager::FinishWorkOrders()
{
for ( ... ) {
Material_WorkOrder *material = Mat_Work_Orders[x];
glGenTextures(1, material->gl_array_ID);
glBindTexture(GL_TEXTURE_2D_ARRAY, *material->gl_array_ID);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA, material->size.x, material->size.y, 5, 0, GL_RGBA, GL_UNSIGNED_BYTE, material->textureData);
glGenerateMipmap(GL_TEXTURE_2D_ARRAY);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_GENERATE_MIPMAP, GL_TRUE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameterf(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAX_ANISOTROPY_EXT, 16.0f);
// Material handle is of type GLuint64
*material->handle = glGetTextureHandleARB(*material->gl_array_ID);
glMakeTextureHandleResidentARB(*material->handle);
}
}
VAO generation:
GLuint ModelManager::genVAO(const vector<vec3> &vs, const vector<vec2> &uv, const vector<vec3> &nm, const vector<vec3> &tg, const vector<vec3> &bt, const vector<GLuint64*> &ts)
{
AttribBuffers b;
GLuint vao = 0;
size_t arraySize = vs.size();
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(MAX_BUFFERS, b.buffers);
// Vertex array
glBindBuffer(GL_ARRAY_BUFFER, b.buffers[0]);
glBufferData(GL_ARRAY_BUFFER, arraySize * sizeof(vec3), &vs[0][0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
...
// Texture Handle array
glBindBuffer(GL_ARRAY_BUFFER, b.buffers[5]);
glBufferData(GL_ARRAY_BUFFER, arraySize * sizeof(GLuint64), &ts[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(5);
glVertexAttribLPointer(5, 1, GL_UNSIGNED_INT64_ARB, 0, 0);
glBindVertexArray(0);
return vao;
}
Shader Usage:
Vertex:
#version 450
#extension GL_ARB_bindless_texture : require
#extension GL_ARB_gpu_shader5 : require
#extension GL_ARB_gpu_shader_int64 : require
layout (location = 0) in vec3 vertex;
...
layout (location = 5) in uint64_t texHandle;
flat out uint64_t TextureHandle;
void main(void)
{
TextureHandle = texHandle;
...
gl_Position = worldMVP * v;
}
Fragment:
#version 450
#extension GL_ARB_bindless_texture : require
#extension GL_ARB_gpu_shader5 : require
#extension GL_ARB_gpu_shader_int64 : require
flat in uint64_t TextureHandle;
layout (location = 0) out vec4 DiffuseOut;
void main()
{
sampler2DArray MaterialMap = sampler2DArray(TextureHandle);
...
DiffuseOut = texture(MaterialMap, vec3(TexCoord0, 0));
}

The issue is that glBufferData can't accept a vector of pointers.
I set up my data this way so that the last vertex attribute array was an array of Gluint64* handles for 2 reasons:
1) so that the appropriate texture per triangle would just stream from the vertex shader to the fragment shader, 2) so that I could create the array ahead of time and wait for the texture to be created and update the array automatically by just updating the underlying pointers separately
This can work, but the vector needs to be changed from type GLuint* to GLuint (drop the pointer).

Related

Using texture2D on OpenGL 3.3

So I've been fiddling with an old University project done in OpenGL 3.3 (FreeGLUT + GLEW) and I've run into some problems.
Right at the start, I run the program and I'm getting an error compiling the BumpMap Fragment Shader:
#version 330 core
#define lightCount 10
in vec4 vertPos;
in vec4 eyeModel;
in vec3 normalInterp;
in vec4 ambient;
in vec4 color;
in vec4 spec;
in vec2 texuv;
in vec4 lightPosition[lightCount];
struct LightSource {
vec4 Position;
vec4 Direction;
vec4 Color;
float CutOff;
float AmbientIntensity;
float DiffuseIntensity;
float SpecularIntensity;
float ConstantAttenuation;
float LinearAttenuation;
float ExponentialAttenuation;
int lightType;
};
layout(std140) uniform LightSources {
LightSource lightSource[10];
};
uniform sampler2D diffuse_tex;
uniform sampler2D normal_tex;
out vec4 out_Color;
void main() {
out_Color = vec4(0);
for(int i=0; i<lightCount; i++) {
if(lightSource[i].lightType == 0)
continue;
vec3 NormalMap = texture2D(normal_tex, texuv).rgb;
vec3 normal = normalize(NormalMap * 2.0 - 1.0); //normalize(normalInterp);
vec4 LightDirection = vertPos - lightSource[i].Position;
float Distance = length(LightDirection);
LightDirection = normalize(LightDirection);
vec4 ambientColor = ambient * lightSource[i].Color * lightSource[i].AmbientIntensity;
vec4 diffuseColor = vec4(0, 0, 0, 0);
vec4 dColor = texture2D(diffuse_tex, texuv);
vec4 specularColor = vec4(0, 0, 0, 0);
float DiffuseFactor = dot(normal, vec3(-LightDirection));
if (DiffuseFactor > 0) {
diffuseColor = dColor * lightSource[i].Color * lightSource[i].DiffuseIntensity * DiffuseFactor;
vec3 VertexToEye = normalize(vec3(eyeModel - vertPos));
vec3 LightReflect = normalize(reflect(vec3(LightDirection), normal));
float SpecularFactor = dot(VertexToEye, LightReflect);
SpecularFactor = pow(SpecularFactor, 255);
if(SpecularFactor > 0.0){
//SpecularFactor = pow( max(SpecularFactor,0.0), 255);
specularColor = spec * lightSource[i].Color * lightSource[i].SpecularIntensity * SpecularFactor;
}
}
out_Color += ambientColor + diffuseColor + specularColor;
}
}
ERROR: 0:55: 'function' : is removed in Forward Compatible context texture2D
ERROR: 0:55: 'texture2D' : no matching overloaded function found (using implicit conversion)
So I looked the problem up and even though I thought it was weird I was getting this problem on a project I knew had been in working condition, I switched the texture2D call for a texture call and now the shader compiles, but I get a different error, where creating the buffer object for the first object in the scene:
//Consts defined here for readability
#define VERTICES 0
#define COLORS 1
#define NORMALS 2
#define TEXUVS 3
#define AMBIENTS 4
#define TANGENTS 5
#define SPECULARS 6
#define SPECULARS_CONSTANTS 7
#define NOISE_SCALE 8
void BufferObject::createBufferObject() {
glGenVertexArrays(1, &_vertexArrayObjectID);
glBindVertexArray(_vertexArrayObjectID);
glGenBuffers(1, &_vertexBufferObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*_vertexCount, _vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(VERTICES);
glVertexAttribPointer(VERTICES, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glEnableVertexAttribArray(COLORS);
glVertexAttribPointer(COLORS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)sizeof(_vertices[0].XYZW));
glEnableVertexAttribArray(NORMALS);
glVertexAttribPointer(NORMALS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)));
glEnableVertexAttribArray(TEXUVS);
glVertexAttribPointer(TEXUVS, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)));
glEnableVertexAttribArray(AMBIENTS);
glVertexAttribPointer(AMBIENTS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)));
glEnableVertexAttribArray(TANGENTS);
glVertexAttribPointer(TANGENTS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)));
glEnableVertexAttribArray(SPECULARS);
glVertexAttribPointer(SPECULARS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)+sizeof(_vertices[0].TANGENT)));
glEnableVertexAttribArray(SPECULARS_CONSTANTS);
glVertexAttribPointer(SPECULARS_CONSTANTS, 1, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)+sizeof(_vertices[0].TANGENT)+sizeof(_vertices[0].SPECULAR)));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glDisableVertexAttribArray(VERTICES);
glDisableVertexAttribArray(COLORS);
glDisableVertexAttribArray(NORMALS);
glDisableVertexAttribArray(TEXUVS);
glDisableVertexAttribArray(AMBIENTS);
glDisableVertexAttribArray(TANGENTS);
glDisableVertexAttribArray(SPECULARS);
glDisableVertexAttribArray(SPECULARS_CONSTANTS);
Utility::checkOpenGLError("ERROR: Buffer Object creation failed.");
}
OpenGL ERROR [Invalid Operation] = 1282
And that's all the info I'm getting. I've moved the checkOpenGLError around and figured out the line glDisableVertexAttribArray(VERTICES) is giving the error.
After a bit more of digging I found out that you're not supposed to set glBindVertexArray(0) (at least before you glDisableVertexAttribArray, from what I remember we set those flags to 0 so we wouldn't accidentally affect anything we didn't want)
At this point the error moves to where we're drawing one of the scene objects. At this point I've hit a bit of a wall and don't where to go to next. I guess my question is whether there is a configuration when running the project that needs to be set, or whether just running this on a more recent graphics card could account for the different behaviour. As a final note, this is running on windows off of Visual Studio 10 (or 15, switched to 10 when reverted all changes and didn't retarget the solution) in widnows, the program configurations are as follows:
//GLUT Init
glutInit(&argc, argv);
glutInitContextVersion(3, 3);
glutInitContextFlags(GLUT_FORWARD_COMPATIBLE);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE,GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glutInitWindowSize(windowWidth, windowHeight);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
windowHandle = glutCreateWindow(CAPTION);
//GLEW Init
glewExperimental = GL_TRUE;
GLenum result = glewInit();
//GLUT Init
std::cerr << "CONTEXT: OpenGL v" << glGetString(GL_VERSION) << std::endl;
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glDepthRange(0.0,1.0);
glClearDepth(1.0);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
with the context above being:
CONTEXT: OpenGL v3.3.0 - Build 22.20.16.4749
Let me know if any aditional information is required, I didn't want to add any more unnecessary clutter and the project is too big to just paste it all here...
In your shader you are using glsl version 330 core, which means texture2D() is deprecated and you should use texture() instead.
As for your INVALID OPERATION error, the problem is that you unbound the vao with glBindVertexArray(0); and then called glDisableVertexAttribArray(VERTICES); which operates on the currently bound vao. You should move glBindVertexArray(0); under these calls.
First let me refer to the specification, OpenGL 4.6 API Core Profile Specification; 10.3.1 Vertex Array Objects; page 347 :
The name space for vertex array objects is the unsigned integers, with zero reserved by the GL.
...
A vertex array object is created by binding a name returned by GenVertexArray with the command
void BindVertexArray( uint array );
array is the vertex array object name. The resulting vertex array object is a new state vector, comprising all the state and with the same initial values listed in tables 23.3 and 23.4.
BindVertexArray may also be used to bind an existing vertex array object. If the bind is successful no change is made to the state of the bound vertex array object, and any previous binding is broken.
Tables 23.3, Vertex Array Object State
VERTEX_ATTRIB_ARRAY_ENABLED, VERTEX_ATTRIB_ARRAY_SIZE, VERTEX_ATTRIB_ARRAY_STRIDE, VERTEX_ATTRIB_ARRAY_TYPE, VERTEX_ATTRIB_ARRAY_NORMALIZED, VERTEX_ATTRIB_ARRAY_INTEGER, VERTEX_ATTRIB_ARRAY_LONG, VERTEX_ATTRIB_ARRAY_DIVISOR, VERTEX_ATTRIB_ARRAY_POINTER
Table 23.4, Vertex Array Object State
ELEMENT_ARRAY_BUFFER_BINDING, VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, VERTEX_ATTRIB_BINDING, VERTEX_ATTRIB_RELATIVE_OFFSET, VERTEX_BINDING_OFFSET, VERTEX_BINDING_STRIDE, VERTEX_BINDING_DIVISOR, VERTEX_BINDING_BUFFER.
This means that a Vertex Array Object collects all the information which is necessary to draw an object. In the vertex array object is stored the information about the location of the vertex attributes and the format.
Further the vertex array object "knows" whether an attribute is enabled or disabled.
If you do
glBindVertexArray(0);
glDisableVertexAttribArray( .... );
this causes an INVALID_OPERATION error, when you use a core profile OpenGL context, because then the vertex array object 0 is not a valid vertex array object.
If you would use a compatibility profile context this would not cause an error, because then the vertex array object 0 is the default vertex array object and is valid.
If you do
glBindVertexArray(_vertexArrayObjectID);
glEnableVertexAttribArray( ..... );
glVertexAttribPointer( ..... );
glDisableVertexAttribArray( ..... );
glBindVertexArray(0);
then the draw call will fail. You have made the effort to define all the arrays of generic vertex attributes data and to enable them all correctly, but right after doing that you disable them again. So in the vertex array object is stored the "disabled" state for all the attributes.
The correct procedure to define a vertex array object is:
Generate the vertex buffers, and creates and initializes the buffer object's data store (this step can be done after creating and binding the vertex array object, too):
glGenBuffers(1, &_vertexBufferObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*_vertexCount, _vertices, GL_STATIC_DRAW);
Generate and bind the vertex array object:
glGenVertexArrays(1, &_vertexArrayObjectID);
glBindVertexArray(_vertexArrayObjectID);
Define and enable the arrays of generic vertex attributes data (this has to be done after binding the vertex array object):
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glEnableVertexAttribArray(VERTICES);
glVertexAttribPointer(VERTICES, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
....
glBindBuffer(GL_ARRAY_BUFFER, 0);
If you would use an element buffer (GL_ELEMENT_ARRAY_BUFFER), then you would have to specify it now, because the name of (reference to) the element buffer object is stored in the vertex array object, and this has to be currently bound, when the element buffer object gets bound.
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, ..... );
Finally you can do glBindVertexArray(0). But there is no reason to do that. It is sufficient to bind a new vertex array object, before you specify a new mesh, or to bind the proper vertex array object before you draw a mesh.
Further there is no need for glDisableVertexAttribArray, as long you don't want to change the vertex array object specification. The state "enabled" is stored in the vertex array object an kept there. If you bind a new vertex array object, the then the object and so all its states become current.
Now drawing is simple:
glBindVertexArray(_vertexArrayObjectID);
glDrawArrays( .... );
Again there is no need of glBindVertexArray(0), after the draw call (especially in core mode).

Apply a texture with OpenGL 3.0 / GLSL 1.3

Currently I create my 3D models using following code (simplified):
gl3Element->shaderProgram=glCreateProgram();
glAttachShader(gl3Element->shaderProgram,m_gl3VertexShader);
glAttachShader(gl3Element->shaderProgram,m_gl3DynColourFragmentShader);
glLinkProgram(gl3Element->shaderProgram);
glDeleteShader(m_gl3VertexShader);
glDeleteShader(m_gl3DynColourFragmentShader);
glGenVertexArrays(1, &gl3Element->VAO);
glGenBuffers(1, &gl3Element->VBO);
glBindVertexArray(entity->m_gl3Element.VAO);
glBindBuffer(GL_ARRAY_BUFFER,entity->m_gl3Element.VBO);
glBufferData(GL_ARRAY_BUFFER,size,data,GL_STATIC_DRAW);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,3*sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
// todo: add texture code here?
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
My current (not working) texture code looks like this:
glGenTextures(1, &imgEntity->m_glTexture);
glBindTexture(GL_TEXTURE_2D, imgEntity->m_glTexture);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,transImage->GetWidth(),transImage->GetHeight(),0,GL_RGB,GL_UNSIGNED_BYTE,transImage->GetData());
glBindTexture(GL_TEXTURE_2D, imgEntity->m_glTexture);
The things that are missing obviously are texture coordinates and assignment of the texture to the created model. So my questions:
How can I apply some valid texture coordinates to the object using only OpenGL 3.0 and GLSL 1.3?
How do I assign these texture data to the model so that they are drawn on my next call to
glBindVertexArray(element->VAO);
glDrawArrays(element->arrayType,arrayStart,element->arraySize);
for this model ?
Thanks!
How can I apply some valid texture coordinates to the object using
only OpenGL 3.0 and GLSL 1.3?
Texture coordinates are normally generated by a third party program such as 3DS Max or Blender. 3D artists use these programs to texture their models, when the model is exported the texture coordinates are also exported in the model file. When the model is loaded for rendering, the texture coordinates for each vertex are extracted and then we can pass these coordinates to the shader via a shader attribute.
How do I assign these texture data to the model?
Getting textured geometry in OpenGL can be a bit of a process so I will try to break down the process within a few steps.
Get the models texture coordinates; could be programmatically generated or loaded from a model.
Load in the texture so that it can be used by OpenGL
Set up the attribute array so that the shader can find the texture coordinates.
Modify the vertex shader and fragment shader to support textured geometry
It looks like you already have a mechanism for number 2 (loading in the texture).
So you seem to be just missing the last two steps.
To get the texture coordinates associated with the vertex data you can specify attributes that are associated with the vertex data.
As per the OpenGL documentation:
Vertex attributes are used to communicate from "outside" to the vertex shader. Unlike uniform variables, values are provided per vertex (and not globally for all vertices). There are built-in vertex attributes like the normal or the position, or you can specify your own vertex attribute like a tangent or another custom value. Attributes can't be defined in the fragment shader.
Some sample code might look like this:
//An easy way keep track of what locations are assigned for each attribute
enum Attribute_Location
{
AL_Vertices = 0,
AL_DiffuseTexCoords = 1,
AL_AlphaTexCoords = 2,
AL_Normals = 3,
};
GLuint uvBuffer;
glGenBuffers(1, &uvBuffer);
//Bind the buffer
glBindBuffer(
GL_ARRAY_BUFFER,
uvBuffer);
//Bind the data to the buffer
glBufferData(GL_ARRAY_BUFFER,
bufferSize, //size of the buffer you are uploading
&diffuseTexCoords[0], //array of texture coords
GL_STATIC_DRAW);
glEnableVertexAttribArray(AL_DiffuseTexCoords);
//Tells OpenGL how to assign data from the texture buffer to the shader
glVertexAttribPointer(AL_DiffuseTexCoords,
2,
GL_FLOAT,
GL_FALSE,
0,
0);
And here is an example of how the vertex shader and fragment shader would look, courtesy of http://www.opengl-tutorial.org/beginners-tutorials/tutorial-5-a-textured-cube/
Textured.vs
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec2 vertexUV;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
void main()
{
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
// UV of the vertex. No special space for this one.
UV = vertexUV;
}
Textured.fs
#version 330 core
// Interpolated values from the vertex shaders
in vec2 UV;
// Ouput data
out vec3 color;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
void main()
{
// Output color = color of the texture at the specified UV
color = texture( myTextureSampler, UV ).rgb;
}
Note that the attribute location of the vertices and texture coordinates specified in the enum Attribute_Location match the layout location in the vertex shader:
enum Attribute_Location
{
AL_Vertices = 0,
AL_DiffuseTexCoords = 1,
...
}
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec2 vertexUV;

Passing color to Assimp model's GLSL shader 1.40 on Intel HD 4000

I'm trying to pass color attributes to each point of a 3d model imported by assimp via the vertex shader.
Attempted solutions:
Pass in via layout (location = 0) in vec3 color;
My laptop has Intel HD 4000 graphics which do not support #extension ARB_explicit_attrib_location : require. I am running GLSL 140 which requires the #extension for layout to work.
Have a separate vector of color values whose indices match those of the points and pass in as another attribute.
Did not work. **Edit (what didn't work): ** the shader never received the color passed by main.cpp.
My code
part of main.cpp
GLuint program_id = LoadShaders("pointcloud.vert", "pointcloud.frag");
ObjLoader *obj_loader = new ObjLoader();
int result = 0;
if (argc > 1) result = obj_loader->loadAsset(argv[1]);
else{ result = obj_loader->loadAsset("obj_samples/Heart.stl"); rotateY = 0.75f; rotateX = 0.5f;}
if (result){glfwTerminate(); exit(EXIT_FAILURE);}
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
GLuint matrix_id = glGetUniformLocation(program_id, "MVP");//get the location for our "MVP" uniform variable
GLfloat *g_vertex_buffer_data = (GLfloat*)malloc(obj_loader->getNumVertices()*sizeof(GLfloat));//use a large buffer to store the entire scene
obj_loader->loadVertices(g_vertex_buffer_data);//load the scene into the vertex buffer
GLint attribute_vertex = glGetAttribLocation(program_id, "vertex_pos");//get the location for the attribute variables
GLuint vertex_array_id;//generate the Vertex Array Object (Depedency: GLEW)
GLuint vertex_buffer[2];//initialize the vertex buffer memory (similar to malloc)
GLuint color_buffer;
glGenVertexArrays(1, &vertex_array_id); //create the VAO
glBindVertexArray(vertex_array_id); // bind the VAO
glGenBuffers(1, vertex_buffer); //create vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer[0]); //bind the buffer
glBufferData(GL_ARRAY_BUFFER, obj_loader->getNumVertices()*sizeof(GLfloat), g_vertex_buffer_data, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vec3d), 0);
glEnableVertexAttribArray(attribute_vertex);//enable
glUseProgram(program_id);//use our shader
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glPointSize(3.0f);
LoadShaders():
GLuint LoadShaders(const char * vertex_file_path,const char * fragment_file_path){//load the shader programs and return the id
GLuint vertex_shader_id = glCreateShader(GL_VERTEX_SHADER);//create the vertex and fragment shaders
GLuint fragment_shader_id = glCreateShader(GL_FRAGMENT_SHADER);
std::string vertex_shader_code = readSourceFile(vertex_file_path);//read the shader and fragment programs into string
if(vertex_shader_code == "")return 0; //failed because empty string
printf("Compiling Vertex shader : %s\n", vertex_file_path);
CompileShader(vertex_shader_code, vertex_shader_id);
std::string fragment_shader_code = readSourceFile(fragment_file_path);//load the fragment program (optional)
if(fragment_shader_code == "")return 0; //failed: empty string
printf("Compiling Fragment shader : %s\n", fragment_file_path);//compile the fragment shader
CompileShader(fragment_shader_code, fragment_shader_id);
GLint result = GL_FALSE;
int infolog_length;
printf("Linking program\n");//link the program
GLuint program_id = glCreateProgram();
glAttachShader(program_id, vertex_shader_id);
glAttachShader(program_id, fragment_shader_id);
glLinkProgram(program_id);
glGetProgramiv(program_id, GL_LINK_STATUS, &result);//check the program and ensure that the program is linked properly
glGetProgramiv(program_id, GL_INFO_LOG_LENGTH, &infolog_length);
if ( infolog_length > 0 ){
std::vector<char> program_error_msg(infolog_length+1);
glGetProgramInfoLog(program_id, infolog_length, NULL, &program_error_msg[0]);
printf("%s\n", &program_error_msg[0]);
}else{
printf("Linked Successfully\n");
}
glDeleteShader(vertex_shader_id);
glDeleteShader(fragment_shader_id);
return program_id;
}
pointcloud.vert:
#version 140
uniform mat4 MVP;
out vec4 colorV;
in vec3 vertex_pos;
void main(){
gl_Position = MVP * vec4(vertex_pos, 1.0f);// Output position of the vertex, in clip space : MVP * position
colorV = vec4(1.0f, 0.0f, 0.0f, 1.0f);
}
I have a std::vector of rgb values I'd like to pass to the shader - any recommendations to achieve this seemingly simple behavior?
Judging from your comment I assume that I misunderstood the problem.
If you want to pass vertex-colors from assimp into the shader, you need to define another vertex attribute. To do this, you need to create another buffer after the vertex buffer:
GLfloat* colorData = (GLfloat*)malloc(sizeof(GLfloat)*obj_loader->getNumVertices()*3);
obj_loader->loadColors(colorData);//or however you named it
Gluint colorBuffer;
glGenBuffers(1, &colorBuffer);
glBindBuffer(GL_ARRAY_BUFFER, colorBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Glfloat)*obj_loader->getNumVertices()*3, colorData, GL_STATIC_DRAW);
glBindAttribLocation(program_id, 0, "vertex_pos");
glBindAttribLocation(program_id, 1, "colorData");
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
//... continue with your code
Vertex Shader:
#version 140
in vec3 vertex_pos;
in vec3 colorData;
out vec4 colorV;
uniform mat4 MVP;
void main()
{
//...
}

How to use glDrawElementsInstanced + Texture Buffer Objects?

My use case is a bunch a textured quads that I want to draw. I'm trying to use the same indexed array of a quad to draw it a bunch of times and use the gl_InstanceID and gl_VertexID in GLSL to retrieve texture and position info from a Texture Buffer.
The way I understand a Texture Buffer is that I create it and my actual buffer, link them, and then whatever I put in the actual buffer magically appears in my texture buffer?
So I have my vertex data and index data:
struct Vertex
{
GLfloat position[4];
GLfloat uv[2];
};
Vertex m_vertices[4] =
{
{{-1,1,0,1},{0,1}},
{{1,1,0,1},{1,1}},
{{-1,-1,0,1},{0,0}},
{{1,-1,0,1},{1,0}}
};
GLuint m_indices[6] = {0,2,1,1,2,3};
Then I create my VAO, VBO and IBO for the quads:
glGenBuffers(1,&m_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER,m_vertexBuffer);
glBufferData(GL_ARRAY_BUFFER,sizeof(Vertex)*4,&m_vertices,GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER,0);
glGenVertexArrays(1,&m_vao);
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER,m_vertexBuffer);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0,4,GL_FLOAT, GL_FALSE, sizeof(struct Vertex),(const GLvoid*)offsetof(struct Vertex, position));
glEnableVertexAttribArray(1);
glVertexAttribPointer(0,2,GL_FLOAT, GL_FALSE, sizeof(struct Vertex),(const GLvoid*)offsetof(struct Vertex, uv));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER,0);
glBindVertexArray(m_vao);
glGenBuffers(1, &m_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)*6,&m_indices,GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
I'm pretty sure that I've done the above correctly. My quads have 4 vertices, with six indexes to draw triangles.
Next I create my buffer and texture for the the Texture Buffer:
glGenBuffers(1,&m_xywhuvBuffer);
glBindBuffer(GL_TEXTURE_BUFFER, m_xywhuvBuffer);
glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat)*8*100, nullptr, GL_DYNAMIC_DRAW); // 8 floats
glGenTextures(1,&m_xywhuvTexture);
glBindTexture(GL_TEXTURE_BUFFER, m_xywhuvTexture);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, m_xywhuvBuffer); // they're in pairs of 2, in r,g of each texel.
glBindBuffer(GL_TEXTURE_BUFFER,0);
So, the idea is that every four texels belongs to one quad, or gl_InstanceID.
When I'm drawing my quads, they execute the below:
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_TEXTURE_BUFFER, m_xywhuvBuffer);
std::vector<GLfloat> xywhuz =
{
-1.0f + position.x / screenDimensions.x * 2.0f,
1.0f - position.y / screenDimensions.y * 2.0f,
dimensions.x / screenDimensions.x,
dimensions.y / screenDimensions.y,
m_region.x,
m_region.y,
m_region.w,
m_region.h
};
glBufferSubData(GL_TEXTURE_BUFFER, sizeof(GLfloat)*8*m_rectsDrawnThisFrame, sizeof(GLfloat)*8, xywhuz.data());
m_rectsDrawnThisFrame++;
So I increase m_rectsDrawThisFrame for each quad. You'll notice that the data I'm passing is 8 GLfloats, so each of the 4 texels that belong to each gl_InstanceID is the x,y position, the width and height, and then the same details for the real texture that I'm going to texture my quads with.
Finally once all of my rects have updated their section of the GL_TEXTURE_BUFFER I run this:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D,texture); // this is my actual texture that the quads take a section from to texture themselves.
glUniform1i(m_program->GetUniformLocation("tex"),1);
glUniform4f(m_program->GetUniformLocation("color"),1,0,1,1);
glBindVertexArray(m_vao);
glDrawElementsInstanced(GL_TRIANGLES,4,GL_UNSIGNED_INT,0,m_rectsDrawnThisFrame);
m_rectsDrawnThisFrame = 0;
I reset the draw count. I also noticed that I had to activate the texture in the second slot. Does the Texture Buffer Object use up one?
Finally my Vert shader
#version 410
layout (location = 0) in vec4 in_Position;
layout (location = 1) in vec2 in_UV;
out vec2 ex_texcoord;
uniform samplerBuffer buf;
void main(void)
{
vec2 position = texelFetch(buf,gl_InstanceID*4).xy;
vec2 dimensions = texelFetch(buf,gl_InstanceID*4+1).xy;
vec2 uvXY = texelFetch(buf,gl_InstanceID*4+2).xy;
vec2 uvWH = texelFetch(buf,gl_InstanceID*4+3).xy;
if(gl_VertexID == 0)
{
gl_Position = vec4(position.xy,0,1);
ex_texcoord = uvXY;
}
else if(gl_VertexID == 1)
{
gl_Position = vec4(position.x + dimensions.x, position.y,0,1);
ex_texcoord = vec2(uvXY.x + uvWH.x, uvXY.y);
}
else if(gl_VertexID == 2)
{
gl_Position = vec4(position.x, position.y + dimensions.y, 0,1);
ex_texcoord = vec2(uvXY.x, uvXY.y + uvWH.y);
}
else if(gl_VertexID == 3)
{
gl_Position = vec4(position.x + dimensions.x, position.y + dimensions.y, 0,1);
ex_texcoord = vec2(uvXY.x + uvWH.x, uvXY.y + uvWH.y );
}
}
And my Frag shader
#version 410
in vec2 ex_texcoord;
uniform sampler2D tex;
uniform vec4 color = vec4(1,1,1,1);
layout (location = 0) out vec4 FragColor;
void main()
{
FragColor = texture(tex,ex_texcoord) * color;
}
Now the problem, after I'm getting no errors reported in GLIntercept, is that I'm getting nothing drawn on the screen.
Any help?
There is one subtle issue in your code that would certainly stop it from working. At the end of the VAO/VBO setup code, you have this:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
The GL_ELEMENT_ARRAY_BUFFER binding is part of the VAO state. If you unbind it while the VAO is bound, this VAO will not have an element array buffer binding. Which means that you don't have indices when you draw later.
You should simply remove this call:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
Also, since you have 6 indices, the second argument to the draw call should be 6:
glDrawElementsInstanced(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0, m_rectsDrawnThisFrame);
Apart from that, it all looks reasonable to me. But there's quite a lot of code, so I can't guarantee that I would have spotted all problems.
I also noticed that I had to activate the texture in the second slot. Does the Texture Buffer Object use up one?
Yes. The buffer texture needs to be bound, and the value of the sampler variable set to the corresponding texture unit. Since you bind the buffer texture during setup, never unbind it, and the default value of the sampler variable is 0, you're probably fine there. But I think it would be cleaner to set it up more explicitly. Where you prepare for drawing:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER, m_xywhuvTexture);
glUniform1i(m_program->GetUniformLocation("buf"), 0);

Getting maximum/minimum luminance of texture OpenGL

I'm starting with OpenGL, and I want to create a tone mapping - algorithm.
I know that my first step is get the max/min luminance value of the HDR image.
I have the image in a texture in FBO, and I'm not sure how to start.
I think the best way is to pass tex coords to a fragment shader and then go through all the pixels and generates somehow smaller textures.
But, I don't know how to do downsampling manually until I had a 1x1 texture; should I had a lot of FBO? where I create each new texture?
I searched a lot of info but I still have no clear almost anything.
I would appreciate some help to situate myself and to start.
EDIT 1. Here's my shaders, and how I pass texture coords to vertex shader:
To pass texture coords and vertex positions, I draw a quad using VBO:
void drawQuad(Shaders* shad){
// coords: vertex (3) + texture (2)
std::vector<GLfloat> quadVerts = {
-1, 1, 0, 0, 0,
-1, -1, 0, 0, 1,
1, 1, 0, 1, 0,
1, -1, 0, 1, 1};
GLuint quadVbo;
glGenBuffers(1, &quadVbo);
glBindBuffer(GL_ARRAY_BUFFER, quadVbo);
glBufferData(GL_ARRAY_BUFFER, 4 * 5 * sizeof(GLfloat), &quadVerts[0], GL_STATIC_DRAW);
// Shader attributes
GLuint vVertex = shad->getLocation("vVertex");
GLuint vUV = shad->getLocation("vUV");
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 3 * sizeof(GLfloat), NULL);
// Set attribs
glEnableVertexAttribArray(vVertex);
glVertexAttribPointer(vVertex, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 5, 0);
glEnableVertexAttribArray(vUV);
glVertexAttribPointer(vUV, 2, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 5, (void*)(3 * sizeof(GLfloat)));
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); // Draw
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(vVertex);
glDisableVertexAttribArray(vUV);
}
Vertex shader:
#version 420
in vec2 vUV;
in vec4 vVertex;
smooth out vec2 vTexCoord;
uniform mat4 MVP;
void main()
{
vTexCoord = vec2(vUV.x * 1024,vUV.y * 512);
gl_Position = MVP * vVertex;
}
And fragment shader:
#version 420
smooth in vec2 vTexCoord;
layout(binding=0) uniform sampler2D texHDR; // Tex image unit binding
layout(location=0) out vec4 color; //Frag data output location
vec4[4] col;
void main(void)
{
for(int i=0;i<=1;++i){
for(int j=0;j<=1;++j){
col[(2*i+j)] = texelFetch(texHDR, ivec2(2*vTexCoord.x+i,2*vTexCoord.y+j),0);
}
}
color = (col[0]+col[1]+col[2]+col[3])/4;
}
In this test code, I have a texture with size 1024*512. My idea is to render to texture attached to GL_ATTACHMENT_0 in a FBO (layout(location=0)) using this shaders and the texture binded in GL_TEXTURE_0 which has the image (layout(binding=0)).
My target is to have the image in texHDR in my FBO texture reducing its size by two.
For downsampling, all you need to do in the fragment shader is multiple texture lookups, then combine them for the output fragment. For example, you could do 2x2 lookups, so each pass would reduce the resolution in x and y by a factor 2.
Let's say you want to reduce a 1024x1024 image. Then you would render a quad into a 512x512 image. Set it up so your vertex shader simply generates values for x and y between 0 and 511. The fragment shader then calls texelFetch(tex, ivec2(2*x+i,2*y+j)), where i and j loop from 0 to 1. Cache those four values, output min and max in your texture.