So I've been fiddling with an old University project done in OpenGL 3.3 (FreeGLUT + GLEW) and I've run into some problems.
Right at the start, I run the program and I'm getting an error compiling the BumpMap Fragment Shader:
#version 330 core
#define lightCount 10
in vec4 vertPos;
in vec4 eyeModel;
in vec3 normalInterp;
in vec4 ambient;
in vec4 color;
in vec4 spec;
in vec2 texuv;
in vec4 lightPosition[lightCount];
struct LightSource {
vec4 Position;
vec4 Direction;
vec4 Color;
float CutOff;
float AmbientIntensity;
float DiffuseIntensity;
float SpecularIntensity;
float ConstantAttenuation;
float LinearAttenuation;
float ExponentialAttenuation;
int lightType;
};
layout(std140) uniform LightSources {
LightSource lightSource[10];
};
uniform sampler2D diffuse_tex;
uniform sampler2D normal_tex;
out vec4 out_Color;
void main() {
out_Color = vec4(0);
for(int i=0; i<lightCount; i++) {
if(lightSource[i].lightType == 0)
continue;
vec3 NormalMap = texture2D(normal_tex, texuv).rgb;
vec3 normal = normalize(NormalMap * 2.0 - 1.0); //normalize(normalInterp);
vec4 LightDirection = vertPos - lightSource[i].Position;
float Distance = length(LightDirection);
LightDirection = normalize(LightDirection);
vec4 ambientColor = ambient * lightSource[i].Color * lightSource[i].AmbientIntensity;
vec4 diffuseColor = vec4(0, 0, 0, 0);
vec4 dColor = texture2D(diffuse_tex, texuv);
vec4 specularColor = vec4(0, 0, 0, 0);
float DiffuseFactor = dot(normal, vec3(-LightDirection));
if (DiffuseFactor > 0) {
diffuseColor = dColor * lightSource[i].Color * lightSource[i].DiffuseIntensity * DiffuseFactor;
vec3 VertexToEye = normalize(vec3(eyeModel - vertPos));
vec3 LightReflect = normalize(reflect(vec3(LightDirection), normal));
float SpecularFactor = dot(VertexToEye, LightReflect);
SpecularFactor = pow(SpecularFactor, 255);
if(SpecularFactor > 0.0){
//SpecularFactor = pow( max(SpecularFactor,0.0), 255);
specularColor = spec * lightSource[i].Color * lightSource[i].SpecularIntensity * SpecularFactor;
}
}
out_Color += ambientColor + diffuseColor + specularColor;
}
}
ERROR: 0:55: 'function' : is removed in Forward Compatible context texture2D
ERROR: 0:55: 'texture2D' : no matching overloaded function found (using implicit conversion)
So I looked the problem up and even though I thought it was weird I was getting this problem on a project I knew had been in working condition, I switched the texture2D call for a texture call and now the shader compiles, but I get a different error, where creating the buffer object for the first object in the scene:
//Consts defined here for readability
#define VERTICES 0
#define COLORS 1
#define NORMALS 2
#define TEXUVS 3
#define AMBIENTS 4
#define TANGENTS 5
#define SPECULARS 6
#define SPECULARS_CONSTANTS 7
#define NOISE_SCALE 8
void BufferObject::createBufferObject() {
glGenVertexArrays(1, &_vertexArrayObjectID);
glBindVertexArray(_vertexArrayObjectID);
glGenBuffers(1, &_vertexBufferObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*_vertexCount, _vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(VERTICES);
glVertexAttribPointer(VERTICES, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glEnableVertexAttribArray(COLORS);
glVertexAttribPointer(COLORS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)sizeof(_vertices[0].XYZW));
glEnableVertexAttribArray(NORMALS);
glVertexAttribPointer(NORMALS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)));
glEnableVertexAttribArray(TEXUVS);
glVertexAttribPointer(TEXUVS, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)));
glEnableVertexAttribArray(AMBIENTS);
glVertexAttribPointer(AMBIENTS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)));
glEnableVertexAttribArray(TANGENTS);
glVertexAttribPointer(TANGENTS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)));
glEnableVertexAttribArray(SPECULARS);
glVertexAttribPointer(SPECULARS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)+sizeof(_vertices[0].TANGENT)));
glEnableVertexAttribArray(SPECULARS_CONSTANTS);
glVertexAttribPointer(SPECULARS_CONSTANTS, 1, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)+sizeof(_vertices[0].TANGENT)+sizeof(_vertices[0].SPECULAR)));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glDisableVertexAttribArray(VERTICES);
glDisableVertexAttribArray(COLORS);
glDisableVertexAttribArray(NORMALS);
glDisableVertexAttribArray(TEXUVS);
glDisableVertexAttribArray(AMBIENTS);
glDisableVertexAttribArray(TANGENTS);
glDisableVertexAttribArray(SPECULARS);
glDisableVertexAttribArray(SPECULARS_CONSTANTS);
Utility::checkOpenGLError("ERROR: Buffer Object creation failed.");
}
OpenGL ERROR [Invalid Operation] = 1282
And that's all the info I'm getting. I've moved the checkOpenGLError around and figured out the line glDisableVertexAttribArray(VERTICES) is giving the error.
After a bit more of digging I found out that you're not supposed to set glBindVertexArray(0) (at least before you glDisableVertexAttribArray, from what I remember we set those flags to 0 so we wouldn't accidentally affect anything we didn't want)
At this point the error moves to where we're drawing one of the scene objects. At this point I've hit a bit of a wall and don't where to go to next. I guess my question is whether there is a configuration when running the project that needs to be set, or whether just running this on a more recent graphics card could account for the different behaviour. As a final note, this is running on windows off of Visual Studio 10 (or 15, switched to 10 when reverted all changes and didn't retarget the solution) in widnows, the program configurations are as follows:
//GLUT Init
glutInit(&argc, argv);
glutInitContextVersion(3, 3);
glutInitContextFlags(GLUT_FORWARD_COMPATIBLE);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE,GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glutInitWindowSize(windowWidth, windowHeight);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
windowHandle = glutCreateWindow(CAPTION);
//GLEW Init
glewExperimental = GL_TRUE;
GLenum result = glewInit();
//GLUT Init
std::cerr << "CONTEXT: OpenGL v" << glGetString(GL_VERSION) << std::endl;
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glDepthRange(0.0,1.0);
glClearDepth(1.0);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
with the context above being:
CONTEXT: OpenGL v3.3.0 - Build 22.20.16.4749
Let me know if any aditional information is required, I didn't want to add any more unnecessary clutter and the project is too big to just paste it all here...
In your shader you are using glsl version 330 core, which means texture2D() is deprecated and you should use texture() instead.
As for your INVALID OPERATION error, the problem is that you unbound the vao with glBindVertexArray(0); and then called glDisableVertexAttribArray(VERTICES); which operates on the currently bound vao. You should move glBindVertexArray(0); under these calls.
First let me refer to the specification, OpenGL 4.6 API Core Profile Specification; 10.3.1 Vertex Array Objects; page 347 :
The name space for vertex array objects is the unsigned integers, with zero reserved by the GL.
...
A vertex array object is created by binding a name returned by GenVertexArray with the command
void BindVertexArray( uint array );
array is the vertex array object name. The resulting vertex array object is a new state vector, comprising all the state and with the same initial values listed in tables 23.3 and 23.4.
BindVertexArray may also be used to bind an existing vertex array object. If the bind is successful no change is made to the state of the bound vertex array object, and any previous binding is broken.
Tables 23.3, Vertex Array Object State
VERTEX_ATTRIB_ARRAY_ENABLED, VERTEX_ATTRIB_ARRAY_SIZE, VERTEX_ATTRIB_ARRAY_STRIDE, VERTEX_ATTRIB_ARRAY_TYPE, VERTEX_ATTRIB_ARRAY_NORMALIZED, VERTEX_ATTRIB_ARRAY_INTEGER, VERTEX_ATTRIB_ARRAY_LONG, VERTEX_ATTRIB_ARRAY_DIVISOR, VERTEX_ATTRIB_ARRAY_POINTER
Table 23.4, Vertex Array Object State
ELEMENT_ARRAY_BUFFER_BINDING, VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, VERTEX_ATTRIB_BINDING, VERTEX_ATTRIB_RELATIVE_OFFSET, VERTEX_BINDING_OFFSET, VERTEX_BINDING_STRIDE, VERTEX_BINDING_DIVISOR, VERTEX_BINDING_BUFFER.
This means that a Vertex Array Object collects all the information which is necessary to draw an object. In the vertex array object is stored the information about the location of the vertex attributes and the format.
Further the vertex array object "knows" whether an attribute is enabled or disabled.
If you do
glBindVertexArray(0);
glDisableVertexAttribArray( .... );
this causes an INVALID_OPERATION error, when you use a core profile OpenGL context, because then the vertex array object 0 is not a valid vertex array object.
If you would use a compatibility profile context this would not cause an error, because then the vertex array object 0 is the default vertex array object and is valid.
If you do
glBindVertexArray(_vertexArrayObjectID);
glEnableVertexAttribArray( ..... );
glVertexAttribPointer( ..... );
glDisableVertexAttribArray( ..... );
glBindVertexArray(0);
then the draw call will fail. You have made the effort to define all the arrays of generic vertex attributes data and to enable them all correctly, but right after doing that you disable them again. So in the vertex array object is stored the "disabled" state for all the attributes.
The correct procedure to define a vertex array object is:
Generate the vertex buffers, and creates and initializes the buffer object's data store (this step can be done after creating and binding the vertex array object, too):
glGenBuffers(1, &_vertexBufferObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*_vertexCount, _vertices, GL_STATIC_DRAW);
Generate and bind the vertex array object:
glGenVertexArrays(1, &_vertexArrayObjectID);
glBindVertexArray(_vertexArrayObjectID);
Define and enable the arrays of generic vertex attributes data (this has to be done after binding the vertex array object):
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glEnableVertexAttribArray(VERTICES);
glVertexAttribPointer(VERTICES, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
....
glBindBuffer(GL_ARRAY_BUFFER, 0);
If you would use an element buffer (GL_ELEMENT_ARRAY_BUFFER), then you would have to specify it now, because the name of (reference to) the element buffer object is stored in the vertex array object, and this has to be currently bound, when the element buffer object gets bound.
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, ..... );
Finally you can do glBindVertexArray(0). But there is no reason to do that. It is sufficient to bind a new vertex array object, before you specify a new mesh, or to bind the proper vertex array object before you draw a mesh.
Further there is no need for glDisableVertexAttribArray, as long you don't want to change the vertex array object specification. The state "enabled" is stored in the vertex array object an kept there. If you bind a new vertex array object, the then the object and so all its states become current.
Now drawing is simple:
glBindVertexArray(_vertexArrayObjectID);
glDrawArrays( .... );
Again there is no need of glBindVertexArray(0), after the draw call (especially in core mode).
Related
I know there are uniforms which can solve this problem but I wonder is it possible to add value like int using glVertexAttribPointer()?
Shader:
#version 330 core
layout (location = 0) in vec3 aPosition;
layout (location = 1) in vec3 aColor;
layout (location = 2) in int aSize;
out vec3 vColor;
void main()
{
gl_Position = vec4(aPosition, 1.0f);
gl_PointSize = aSize;
vColor = aColor;
}
The code:
makeCurrent();
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, points.vbo.size() * sizeof(GLfloat), &points.vbo.front(), GL_STATIC_DRAW); //look below to the struct
glVertexAttribPointer(0, points.vertexAmount, GL_FLOAT, GL_FALSE, points.stride * sizeof(GLfloat), (GLvoid *)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, points.colorAmount, GL_FLOAT, GL_FALSE, points.stride * sizeof(GLfloat), (GLvoid *)(points.vertexAmount * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(/* ??? how to add simple int here ??? */);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
doneCurrent();
points struct:
struct RenderedObjects
{
std::vector<float> vbo;
int vertexAmount = 3;
int colorAmount = 3;
int stride = vertexAmount + colorAmount;
int size = 1;
} points;
glVertexAttribPointer is for arrays of data only. When you issue a dasw call, the GPU will fetch the i-th element of each attribute array which is enabled, and it uses the vertex attrib pointer data to find that element in the VBOs.
For attribute inputs for which no attribute array is enabled, you can set the value as part of the GL state. The function family glVertexAttib...() allows you to set the values for a generic vertex attribute.
OpenGL's naming conventions can get a bit confusing here. By default, vertex attributes are always treated as floating-point, even if you specify the input as integers (same with glVertexAttribPointer()s type parameter). So even if you might think glVertexAttrib1i() might set the value of an scalar integer attribute, it in reality deals with setting a float attribute, just by providing an integer value.
Integer attributes where added later to the GL, and you have to use glVertexAttribI() / glVertexAttribIPointer() (note the capital I letter) for these.
So the correct way to specify a single constant input for layout (location = 2) in int aSize; is:
glVertexAttribI1i(2, yourIntegerValue);
glDisableVertexAttribArray(2); // do NOT use an array for this attribute
(When you create a VAO, all attribute arrays are disabled initially, so you might not need to explicitly call the glDisableVertexAttribArray(2) here.)
Problem: Opengl is converting the integer array, I passed into a vertex array object, into a float array for some reason.
When I try to use the vertices as an ivec2 in my vertex shader I get some weird numbers as outputs, however if I use vec2 instead I get the expected output.
Code:
VertexShader:
#version 430 core
// \/ This is what I'm reffering to when talking about ivec2 and vec2
layout (location = 0) in ivec2 aPos;
uniform uvec2 window;
void main()
{
float xPos = float(aPos.x)/float(window.x);
float yPos = float(aPos.y)/float(window.y);
gl_Position = vec4(xPos, yPos, 1.0f, 1.0f);
}
Passing the Vertex Array:
GLint vertices[] =
{
-50, 50,
50, 50,
50, -50,
-50, -50
};
GLuint VBO, VAO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO); // \/ I'm passing it as an int
glVertexAttribPointer(0, 2, GL_INT, GL_FALSE, 2 * sizeof(GLint), (void*)0);
glEnableVertexAttribArray(0);
glUseProgram(sprite2DProgram);
glUniform2ui(glGetUniformLocation(sprite2DProgram, "window"), 640, 480);
Output:
The first picture is what happens when I use ivec2 (the bad output).
The second picture is what happes when I use vec2 (the expected output).
If you need to know anything else please ask it in the comments.
For vertex attributes with an integral data it has to be used glVertexAttribIPointer (focus on the I), to define an array of generic vertex attribute data.
This means, that if you use the ivec2 data type for the attribute in the vertex shader:
in ivec2 aPos;
then you have to use glVertexAttribIPointer wehan you define the array of generic vertex attribute data.
Change your code like this, to solve the issue:
// glVertexAttribIPointer instead of glVertexAttribPointer
glVertexAttribIPointer(0, 2, GL_INT, GL_FALSE, 2 * sizeof(GLint), (void*)0);
See OpenGL 4.6 API Compatibility Profile Specification; 10.2. CURRENT VERTEX ATTRIBUTE VALUES; page m389:
When values for a vertex shader attribute variable are sourced from an enabled generic vertex attribute array,
the array must be specified by a command compatible with the data type of the variable.
The values loaded into a shader attribute variable bound to generic attribute index are undefined if the array for index was not specified by:
VertexAttribFormat, for floating-point base type attributes;
VertexAttribIFormat with type BYTE, SHORT, or INT for signed integer base type attributes; or
VertexAttribIFormat with type UNSIGNED_BYTE, UNSIGNED_SHORT, or UNSIGNED_INT for unsigned integer base type attributes.
Currently I have a system that contains 2 buffers, one created CPU side and set to a buffer. and then one from a ssbo and populated from another shader.
Here is the struct for the ssbo data:
struct ssbo_data_t
{
int maxcount = 1024;
float baz[1024];
} ssbo_data;
Then the vao is build like:
//Build buffer
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, arraysize * sizeof(DATATYPE), dataarr, GL_DYNAMIC_DRAW);
//Build vao
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
//index,size,type,norm,stride,pointerToFirst
glEnableVertexAttribArray(0); //Position of light
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1); //Color
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)(sizeof(DATATYPE) * 2));
glEnableVertexAttribArray(2); //Intensity
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, 0, (void*)(sizeof(DATATYPE) * 5));
glEnableVertexAttribArray(3); //Layer
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, 0, (void*)(sizeof(DATATYPE) * 6));
//Try to bind ssbo for lighting data.
glBindBuffer(GL_ARRAY_BUFFER, ssbo);
glEnableVertexAttribArray(4); //MaxSize
glVertexAttribPointer(4, 1, GL_INT, GL_FALSE, 0, (void*)offsetof(ssbo_data_t,maxcount));
glEnableVertexAttribArray(5); //Corner Data
glVertexAttribPointer(5, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, (void*)offsetof(ssbo_data_t, baz));
My question is that if I set no stride for the one buffer, and then a stride for another part of the buffer it should work right? Or is there something i'm missing?
Cause when I actually preform the draw call, the first line is drawn, but all others have a position of -1,-1 Like the first buffer bound to the VAO was reset.
I am however receiving the correct potions from the other buffer, which shows it is working. Its like the first buffer is being unbound on the next draw call.
Due to how large the project is, I cant really post a working example.
As you can see here, I place the primitive to the left and the other object to the right.
The primitive is drawn, and sets its corner positions to the SSBO.
The second object is then drawn and creates 4 line segments from its position.
The first vertex, works as expected, and creates a line segment from its position and terminates on the corner of the box, specified by the VAO.
The next draws don't read the position correctly. But do take the correct information from the ssbo. So... ?
Might be helpful if I posted the vertex shader:
#version 430 core
layout(location = 0) in vec2 aPos;
layout(location = 1) in vec3 aColor;
layout(location = 2) in float intensity;
layout(location = 3) in float layer;
layout(location = 4) in int maxcount;
layout(location = 5) in vec2 loc;
uniform vec2 screensize;
out VS_OUT{
vec3 color;
float intensity;
vec4 targ;
} vs_out;
void main()
{
vec2 npos = ((aPos - (screensize / 2.0)) / screensize) * 2; //Convert mouse chords to screen chords.
gl_Position = vec4(npos, layer, 1.0);
vs_out.targ = vec4(loc, layer, 1.0);
vs_out.color = aColor;
vs_out.intensity = intensity;
}
To answer your question, yes you can mix and match strides and offsets in the same buffer and share vertex attribute data with an SSBO (Shader Storage Buffer Object).
Vertex stride
I'm not sure how you're trying to use the contents of the SSBO there. However, the binding of vertex attributes #4 and #5 looks fishy.
glVertexAttribPointer(4, 1, GL_INT, GL_FALSE, 0, (void*)offsetof(ssbo_data_t,maxcount));
The first vertex will have maxcount as expected in the x component. However, a stride of 0 means that consecutive vertex attributes are packed. The second vertex will therefore read 32 bits from baz[0] and treat that as an integer. The third will read baz[1] and so on. In short, only the first vertex will have the correct maxcount value and the rest will have bogus values.
To fix this, use instanced arrays (also known as vertex divisor). The function to use is glVertexAttribDivisor(). Another option is to bind your SSBO directly and read it as an SSBO (buffer type in GLSL). See the SSBO OpenGL wiki page for an example.
Finally:
glVertexAttribPointer(5, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, (void*)offsetof(ssbo_data_t, baz));
You can use a stride of 0 here because your attribute values are tightly packed. sizeof(float) * 2 gives the same result.
The (wrong) solution
I wrongly stated that BindVertexArray() resets the current GL_ARRAY_BUFFER binding. This is not the case. It does reset the indexed vertex buffer binding though but you already set those.
I'm creating a set of classes to read in 3d objects from COLLADA files. I started with some basic code to read in the positions and normals and plot them with opengl. I added code to scale the vertices successfully and added all the code I need to read in the color or texture connected with each graphics element in the COLLAD file. But now I need to add the code to draw the vertices with color. I have created the buffer object array to house the color array for each of the vertices array and buffer objects.
This is the code I have to build the arrays from data I obtain from the COLLADA file:
Keep in mind I am still creating this it's not perfect.
// Set vertex coordinate data
glBindBuffer(GL_ARRAY_BUFFER, vbosPosition[i]);
glBufferData(GL_ARRAY_BUFFER, col->vectorGeometry[i].map["POSITION"].size,
scaledData, GL_STATIC_DRAW);
free(scaledData);
loc = glGetAttribLocation(program, "in_coords");//get a GLuint for the attribute and put it into GLuint loc.
glVertexAttribPointer(loc, col->vectorGeometry[i].map["POSITION"].stride, col->vectorGeometry[i].map["POSITION"].type, GL_FALSE, 0, 0);//glVertexAttribPointer — loc specifies the index of the generic vertex attribute to be modified.
glEnableVertexAttribArray(0);
#ifdef Testing_Mesh3D
PrintGLVertex(vbosPosition[i], col->vectorGeometry[i].map["POSITION"].size / 4);
#endif // Set normal vector data
glBindBuffer(GL_ARRAY_BUFFER, vbosNormal[i]);
glBufferData(GL_ARRAY_BUFFER, col->vectorGeometry[i].map["NORMAL"].size, col->vectorGeometry[i].map["NORMAL"].data, GL_STATIC_DRAW);
loc = glGetAttribLocation(program, "in_normals");
glVertexAttribPointer(loc, col->vectorGeometry[i].map["NORMAL"].stride, col->vectorGeometry[i].map["NORMAL"].type, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, vbosColor[i]);
Material* material = col->mapGeometryUrlToMaterial2Effect[col->vectorGeometry[i].id];
if (material->effect1.size() > 0)
{
Effect effect1 = material->effect1[0];
if (effect1.type == enumEffectTypes::color)
{
Color color = effect1.color;
glBufferData(GL_ARRAY_BUFFER, color.length, color.values, GL_STATIC_DRAW);
loc = glGetAttribLocation(program, "in_colors");
glVertexAttribPointer(loc, color.length, color.type, GL_FALSE, 0, 0);
}
else
{
}
}
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
// Initialize uniform data
void Mesh3D::InitializeUniforms(GLuint program) {
GLuint program_index, ubo_index;
struct LightParameters params;
// Specify the rotation matrix
glm::vec4 diff_color = glm::vec4(0.3f, 0.3f, 1.0f, 1.0f);
GLint location = glGetUniformLocation(program, "diffuse_color");
glUniform4fv(location, 1, &(diff_color[0]));
// Initialize UBO data
params.diffuse_intensity = glm::vec4(0.5f, 0.5f, 0.5f, 1.0f);
params.ambient_intensity = glm::vec4(0.3f, 0.3f, 0.3f, 1.0f);
params.light_direction = glm::vec4(-1.0f, -1.0f, 0.25f, 1.0f);
// Set the uniform buffer object
glUseProgram(program);
glGenBuffers(1, &ubo);
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, 3 * sizeof(glm::vec4), ¶ms, GL_STREAM_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glUseProgram(program);
// Match the UBO to the uniform block
glUseProgram(program);
ubo_index = 0;
program_index = glGetUniformBlockIndex(program, "LightParameters");
glUniformBlockBinding(program, program_index, ubo_index);
glBindBufferRange(GL_UNIFORM_BUFFER, ubo_index, ubo, 0, 3 * sizeof(glm::vec4));
glUseProgram(program);
This is a hearder file containing the two string literals I housing the strings used to build the vertex and fragment shader. Again I am new to this and not sure how I need to modify the shader to include colored vertices, I have started by adding an input vec4 for the four float colour ( includes alpha). Any help?
#pragma once
#ifndef Included_shaders
#define Included_shaders
#include<stdio.h>
#include<iostream>
static std::string shaderVert = "#version 330\n"
"in vec3 in_coords;\n"
"in vec3 in_normals;\n"
"in vec4 in_colors; \n"//added by me
"out vec3 vertex_normal;\n"
"void main(void) {\n"
"vertex_normal = in_normals;\n"
"gl_Position = vec4(in_coords, 1.0);\n"
"}\n";
static std::string shaderFrag = "#version 330\n"
"in vec3 vertex_normal;\n"
"out vec4 output_color;\n"
"layout(std140) uniform LightParameters{\n"
"vec4 diffuse_intensity;\n"
"vec4 ambient_intensity;\n"
"vec4 light_direction;\n"
"};\n"
"uniform vec4 diffuse_color;\n"
"void main() {\n"
"/* Compute cosine of angle of incidence */\n"
"float cos_incidence = dot(vertex_normal, light_direction.xyz);\n"
"cos_incidence = clamp(cos_incidence, 0, 1);\n"
"/* Compute Blinn term */\n"
"vec3 view_direction = vec3(0, 0, 1);\n"
"vec3 half_angle = normalize(light_direction.xyz + view_direction);\n"
"float blinn_term = dot(vertex_normal, half_angle);\n"
"blinn_term = clamp(blinn_term, 0, 1);\n"
"blinn_term = pow(blinn_term, 1.0);\n"
"/* Set specular color and compute final color */\n"
"vec4 specular_color = vec4(0.25, 0.25, 0.25, 1.0);\n"
"output_color = ambient_intensity * diffuse_color +\n"
"diffuse_intensity * diffuse_color * cos_incidence +\n"
"diffuse_intensity * specular_color * blinn_term;\n"
"}\n";
#endif
Finally this is the funciton I am modifying to draw the colored elements
void Mesh3D::DrawToParent()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Draw elements of each mesh in the vector
for (int i = 0; i<nVectorGeometry; i++)
{
glBindVertexArray(vaos[i]);
glDrawElements(col->vectorGeometry[i].primitive/*This is 4 for GL_Triangles*/, col->vectorGeometry[i].index_count,
GL_UNSIGNED_SHORT, col->vectorGeometry[i].indices);
}
glBindVertexArray(0);
glutSwapBuffers();
}
am getting a little confused about the glVertexAttribPointer and glGetAttribLocation though I think I get the basic idea. Am I using this right.
Am I setting up the buffer object for colors correctly. Am I correct I have a color for each vertex in this buffer, right now I have only placed the single color that applies to all associated buffers in this array and probably need to change that?
How exactly do I go about drawing the colored vertices when I call glDrawElements?
Don't just refer me to the resources for opengl a lot of the wordy explanations make little sense to me.
Make your vertex shader output color and make the fragment shader take it as input. The color will be interpolated between vertices.
Yes, it seems you have understood glAttribPointer correctly.
DrawElements takes the indices of the vertices you want to draw. The last argument should not contain the indices. Instead, it should probably be null. The indices should be specified with glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ...).
If the color buffer is correctly bound you don't need to do anything special id drawElements for the colors. The shaders will get all the enabled attribarrays.
I can help you better if you run the code and you tell me what problems you get. It would also help if the code was easier to read. If you split it into functions under ten lines in length you may spot some errors yourself and possibly remove some duplication.
My use case is a bunch a textured quads that I want to draw. I'm trying to use the same indexed array of a quad to draw it a bunch of times and use the gl_InstanceID and gl_VertexID in GLSL to retrieve texture and position info from a Texture Buffer.
The way I understand a Texture Buffer is that I create it and my actual buffer, link them, and then whatever I put in the actual buffer magically appears in my texture buffer?
So I have my vertex data and index data:
struct Vertex
{
GLfloat position[4];
GLfloat uv[2];
};
Vertex m_vertices[4] =
{
{{-1,1,0,1},{0,1}},
{{1,1,0,1},{1,1}},
{{-1,-1,0,1},{0,0}},
{{1,-1,0,1},{1,0}}
};
GLuint m_indices[6] = {0,2,1,1,2,3};
Then I create my VAO, VBO and IBO for the quads:
glGenBuffers(1,&m_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER,m_vertexBuffer);
glBufferData(GL_ARRAY_BUFFER,sizeof(Vertex)*4,&m_vertices,GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER,0);
glGenVertexArrays(1,&m_vao);
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER,m_vertexBuffer);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0,4,GL_FLOAT, GL_FALSE, sizeof(struct Vertex),(const GLvoid*)offsetof(struct Vertex, position));
glEnableVertexAttribArray(1);
glVertexAttribPointer(0,2,GL_FLOAT, GL_FALSE, sizeof(struct Vertex),(const GLvoid*)offsetof(struct Vertex, uv));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER,0);
glBindVertexArray(m_vao);
glGenBuffers(1, &m_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)*6,&m_indices,GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
I'm pretty sure that I've done the above correctly. My quads have 4 vertices, with six indexes to draw triangles.
Next I create my buffer and texture for the the Texture Buffer:
glGenBuffers(1,&m_xywhuvBuffer);
glBindBuffer(GL_TEXTURE_BUFFER, m_xywhuvBuffer);
glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat)*8*100, nullptr, GL_DYNAMIC_DRAW); // 8 floats
glGenTextures(1,&m_xywhuvTexture);
glBindTexture(GL_TEXTURE_BUFFER, m_xywhuvTexture);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, m_xywhuvBuffer); // they're in pairs of 2, in r,g of each texel.
glBindBuffer(GL_TEXTURE_BUFFER,0);
So, the idea is that every four texels belongs to one quad, or gl_InstanceID.
When I'm drawing my quads, they execute the below:
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_TEXTURE_BUFFER, m_xywhuvBuffer);
std::vector<GLfloat> xywhuz =
{
-1.0f + position.x / screenDimensions.x * 2.0f,
1.0f - position.y / screenDimensions.y * 2.0f,
dimensions.x / screenDimensions.x,
dimensions.y / screenDimensions.y,
m_region.x,
m_region.y,
m_region.w,
m_region.h
};
glBufferSubData(GL_TEXTURE_BUFFER, sizeof(GLfloat)*8*m_rectsDrawnThisFrame, sizeof(GLfloat)*8, xywhuz.data());
m_rectsDrawnThisFrame++;
So I increase m_rectsDrawThisFrame for each quad. You'll notice that the data I'm passing is 8 GLfloats, so each of the 4 texels that belong to each gl_InstanceID is the x,y position, the width and height, and then the same details for the real texture that I'm going to texture my quads with.
Finally once all of my rects have updated their section of the GL_TEXTURE_BUFFER I run this:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D,texture); // this is my actual texture that the quads take a section from to texture themselves.
glUniform1i(m_program->GetUniformLocation("tex"),1);
glUniform4f(m_program->GetUniformLocation("color"),1,0,1,1);
glBindVertexArray(m_vao);
glDrawElementsInstanced(GL_TRIANGLES,4,GL_UNSIGNED_INT,0,m_rectsDrawnThisFrame);
m_rectsDrawnThisFrame = 0;
I reset the draw count. I also noticed that I had to activate the texture in the second slot. Does the Texture Buffer Object use up one?
Finally my Vert shader
#version 410
layout (location = 0) in vec4 in_Position;
layout (location = 1) in vec2 in_UV;
out vec2 ex_texcoord;
uniform samplerBuffer buf;
void main(void)
{
vec2 position = texelFetch(buf,gl_InstanceID*4).xy;
vec2 dimensions = texelFetch(buf,gl_InstanceID*4+1).xy;
vec2 uvXY = texelFetch(buf,gl_InstanceID*4+2).xy;
vec2 uvWH = texelFetch(buf,gl_InstanceID*4+3).xy;
if(gl_VertexID == 0)
{
gl_Position = vec4(position.xy,0,1);
ex_texcoord = uvXY;
}
else if(gl_VertexID == 1)
{
gl_Position = vec4(position.x + dimensions.x, position.y,0,1);
ex_texcoord = vec2(uvXY.x + uvWH.x, uvXY.y);
}
else if(gl_VertexID == 2)
{
gl_Position = vec4(position.x, position.y + dimensions.y, 0,1);
ex_texcoord = vec2(uvXY.x, uvXY.y + uvWH.y);
}
else if(gl_VertexID == 3)
{
gl_Position = vec4(position.x + dimensions.x, position.y + dimensions.y, 0,1);
ex_texcoord = vec2(uvXY.x + uvWH.x, uvXY.y + uvWH.y );
}
}
And my Frag shader
#version 410
in vec2 ex_texcoord;
uniform sampler2D tex;
uniform vec4 color = vec4(1,1,1,1);
layout (location = 0) out vec4 FragColor;
void main()
{
FragColor = texture(tex,ex_texcoord) * color;
}
Now the problem, after I'm getting no errors reported in GLIntercept, is that I'm getting nothing drawn on the screen.
Any help?
There is one subtle issue in your code that would certainly stop it from working. At the end of the VAO/VBO setup code, you have this:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
The GL_ELEMENT_ARRAY_BUFFER binding is part of the VAO state. If you unbind it while the VAO is bound, this VAO will not have an element array buffer binding. Which means that you don't have indices when you draw later.
You should simply remove this call:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
Also, since you have 6 indices, the second argument to the draw call should be 6:
glDrawElementsInstanced(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0, m_rectsDrawnThisFrame);
Apart from that, it all looks reasonable to me. But there's quite a lot of code, so I can't guarantee that I would have spotted all problems.
I also noticed that I had to activate the texture in the second slot. Does the Texture Buffer Object use up one?
Yes. The buffer texture needs to be bound, and the value of the sampler variable set to the corresponding texture unit. Since you bind the buffer texture during setup, never unbind it, and the default value of the sampler variable is 0, you're probably fine there. But I think it would be cleaner to set it up more explicitly. Where you prepare for drawing:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER, m_xywhuvTexture);
glUniform1i(m_program->GetUniformLocation("buf"), 0);