OpenGL Setting SSBO for Compute Shader - c++

I've been having trouble sending data to an SSBO for use by a compute shader. Unfortunately, the khronos docs say "TODO" and I cant make their sample code work, and people seem to do very slightly different things Example 1 Example 2 Example 3 - can anyone help?
(I've snipped out other parts of code I've written that I don't think are relevant - but the entire codebase is here . Here's what I've got so far:
SSBO Initialization with some data
std::vector<glm::vec4> data = { glm::vec4(1.0, 0.0, 0.0, 1.0), glm::vec4(1.0, 0.0, 0.0, 1.0) };
GLuint SSBO;
glGenBuffers(1, &SSBO);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, SSBO);
glBufferData(GL_SHADER_STORAGE_BUFFER, data.size() * sizeof(glm::vec4), &data[0], GL_DYNAMIC_DRAW);
//the khronos docs put this line in
//glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, SSBO);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
Update loop
s_Data.compute_shader.use();
-- snip: bind a texture --
int ssbo_binding = 1;
int block_index = glGetProgramResourceIndex(s_Data.compute_shader.ID, GL_SHADER_STORAGE_BLOCK, "bufferData");
glShaderStorageBlockBinding(s_Data.compute_shader.ID, block_index, ssbo_binding );
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, ssbo_binding, SSBO);
glDispatchCompute( X workers, Y workers, 1);
//Synchronize all writes to the framebuffer image
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
// Reset bindings
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, ssbo_binding, 0);
glBindImageTexture(0, 0, 0, false, 0, GL_READ_WRITE, GL_RGBA32F);
glUseProgram(0);
-- snip: render output texture to screen --
Compute Shader
#version 430 core
layout(binding = 0, rgba16f) uniform image2D outTexture;
layout(std430, binding = 1 ) readonly buffer bufferData
{
vec4 data[];
};
layout (local_size_x = 16, local_size_y = 8) in;
void main(void) {
ivec2 px = ivec2(gl_GlobalInvocationID.xy);
ivec2 size = imageSize(outTexture);
vec3 color;
if(data.length() > 0)
{
//green = data
color = vec3(0.2, 0.6, 0.2);
} else
{
//red = bad
color = vec3(0.6, 0.2, 0.2);
}
imageStore(outTexture, px, vec4(color, 1.0));
}
Currently my screen displays red indicating no data is being sent via the SSBO.
Edit:
Found the issue. The .length() method in the compute shader does not work.

I found the issue in the compute shader.
The .length() was returning the wrong value. I queried data[0] and data[1] and they returned the correctly set values in the compute shader - so this was the problem (but I don't necessarily have the solution)

Related

Opengl: How to map a buffer CORRECTLY?

I'm new to stack overflow. I have the following problem: I want to have a rotating triangle in my window.
Now, I have already managed to have my code running and I had a rotating triangle. However, I wanted to rewrite the code for learning purposes, adding the two following major things:
Updating the buffer object later on with memcpy
Having an array-of-stuctures (AoS) instead of a structure-of-arrays (SoA)
I'm referring hereby to the "OpenGL Superbible" book.
I'll provide you some code snippets:
glGenVertexArrays(1, &vao);
static const vertex vertices[] = {
{ 0.25, -0.25, 0.5, 1.0 ,
1.0, 0.0, 0.0, 1.0},
{ -0.25, -0.25, 0.5, 1.0,
0.0, 1.0, 0.0, 1.0 },
{ 0.25, 0.25, 0.5, 1.0,
0.0, 0.0, 1.0, 1.0 }
};
glCreateBuffers(1, &buffer);
glNamedBufferStorage(buffer, sizeof(vertices), NULL, 0);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
void * ptr = glMapNamedBuffer(buffer, GL_WRITE_ONLY);
memcpy(ptr, vertices, sizeof(vertices));
glUnmapNamedBuffer(GL_ARRAY_BUFFER);
glVertexArrayVertexBuffer(vao, 0, buffer, 0, sizeof(vmath::vec4));
// Positions
glVertexArrayAttribBinding(vao, 0, 0);
glVertexArrayAttribFormat(vao, 0, 4, GL_FLOAT, GL_FALSE, offsetof(vertex, x));
glEnableVertexArrayAttrib(vao, 0);
// Color
glVertexArrayAttribBinding(vao, 1, 0);
glVertexArrayAttribFormat(vao, 1, 4, GL_FLOAT, GL_FALSE, offsetof(vertex, r));
glEnableVertexArrayAttrib(vao, 1);
glVertexArrayVertexBuffer(vao, 0, buffer, 0, sizeof(vertex));
I set up the vertex struct as follows:
struct vertex {
// Position
float x;
float y;
float z;
float w;
// Color
float r;
float g;
float b;
float a;
};
The first time, I had the color hard-coded in my vertex shader. And I had the position data in a data-array. I set the data directly by calling 'glNamedBufferStorage' instead of (as it is now the case) inserting NULL. Back then, it worked. But as I changed the two things, it stopped working. I know by shure that both of these major steps include some errors.
Here I'll provide you with the vertex shader. the mvp matrix works, by the way, so that's not the problem.
#version 420 core
layout (location = 0) in vec4 position;
layout (location = 1) in vec4 color;
out vec4 vs_color;
uniform mat4 mvp;
void main(void)
{
gl_Position = mvp * position;
vs_color = color;
}
Any hints would be greatly appreciated.
There are several problems in the code:
glUnmapNamedBuffer takes the buffer handle as parameter, not a GLenum. Change glUnmapNamedBuffer(GL_ARRAY_BUFFER); to glUnmapNamedBuffer(buffer);. (And you should check the return value, it returns false when there is a problem).
The stride for glVertexArrayVertexBuffer is wrong. Each vertex starts 8 floats after the previous one (or sizeof(vertex)). Change
glVertexArrayVertexBuffer(vao, 0, buffer, 0, sizeof(vmath::vec4));
to
glVertexArrayVertexBuffer(vao, 0, buffer, 0, sizeof(vertex));

Problems using glReadPixels with GLSL

I`m trying to render some particles and save the scene to a bmp file,
here is my code
// vertex shader
const char *vertexShader = STRINGIFY(
uniform float pointRadius; // point size in world space
uniform float pointScale; // scale to calculate size in pixels
void main()
{
// calculate window-space point size
vec3 posEye = vec3(gl_ModelViewMatrix * vec4(gl_Vertex.xyz, 1.0));
float dist = length(posEye);
gl_PointSize = pointRadius * (pointScale / dist);
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = gl_ModelViewProjectionMatrix * vec4(gl_Vertex.xyz, 1.0);
gl_FrontColor = gl_Color;
}
);
// pixel shader for rendering points as shaded spheres
const char *spherePixelShader = STRINGIFY(
void main()
{
const vec3 lightDir = vec3(0.577, 0.577, 0.577);
// calculate normal from texture coordinates
vec3 N ;
N.xy = gl_TexCoord[0].xy*vec2(2.0, -2.0) + vec2(-1.0, 1.0);
float mag = dot(N.xy, N.xy);
if (mag > 1.0) discard; // kill pixels outside circle
N.z = sqrt(1.0 - mag);
// calculate lighting
float diffuse = max(0.0, dot(lightDir, N));
gl_FragColor = gl_Color *diffuse;
}
Here is the rendering code
Position of the particles are stored in the VBO target_point_buffer as well as corresponding color data
void display()
{
//pointsprite
glEnable(GL_POINT_SPRITE);
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE_NV);
glDepthMask(GL_TRUE);
glEnable(GL_DEPTH_TEST);
//attach shader
glUseProgram(program);
glUniform1f(glGetUniformLocation(program, "pointScale"), winHeight / tanf(fov*0.5f*(float)M_PI / 180.0f));
glUniform1f(glGetUniformLocation(program, "pointRadius"),radius[0]*scale);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//use vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(3, GL_DOUBLE, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
//color buffer
glBindBuffer(GL_ARRAY_BUFFER, color_vbo);
glColorPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_COLOR_ARRAY);
Drawsomething();
}
//Save the scene as an bmp file
void save_as_bmp(char *filename)
{
GLbyte pBits[Imagesize];
GLint iViewPort[4];
GLuint lastBuffer;
glGetIntegerv(GL_VIEWPORT,iViewPort);
glGetIntegerv(GL_READ_BUFFER,&lastBUffer);
glReadPixels(iViewPort[0], iViewPort[1], iViewPort[2], iViewPort[3], GL_BGR, GL_UNSIGNED_BYTE, pBits);
writeBMP(filename,pBits);
}
I`ve got the expected scene like this:
However,when I tried to save the scene as a BMP file,the result was not like I expected:
I suppose that it might be something wrong with the gl_TexCoord in the shader, but I can`t figure it out. Can anyone help?
Set GL_PACK_ALIGNMENT to 1 before your glReadPixels() call if you're going to use a three-component format like GL_BGR with GL_UNSIGNED_BYTE.

How to specify UBO binding with bindless?

I create 2+ simple UBO's with vec4 colorA's and colorB's like so. I only get a black screen using the needlessly complex UBO creation / binding process with std140. How do I specify which index while using glCreate and get this mess to work so I can choose colorA or colorB?
//APP
glCreateBuffers(1, &testUBO);
glNamedBufferData(testUBO, sizeof(glm::vec4), 0, GL_DYNAMIC_DRAW);
glGetNamedBufferParameterui64vNV(testUBO, GL_BUFFER_GPU_ADDRESS_NV, &uboScene_64);
glMakeNamedBufferResidentNV(testUBO, GL_READ_ONLY);
glm::vec4 myVec4 = glm::vec4(0.f, 1.f, 0.f, 1.f); //add to structs.h
glNamedBufferSubData(testUBO, 0, sizeof(glm::vec4), &myVec4
//SHARED HEADER
typedef glm::vec4 vec4;
layout(std140, binding = 0) uniform sceneBuffer
{
vec4 colorA;
};
layout(std140, binding = 1) uniform objectBuffer
{
vec4 colorB;
};
//SHADER PROGRAM
void main()
{
Ci = colorA;
Ci = colorB;
}
Given this shader:
//GLSL
layout(std140, binding = 0) uniform sceneBuffer
{
vec4 colorA;
};
layout(std140, binding = 1) uniform objectBuffer
{
vec4 colorB;
};
And this C++ buffer initialization code:
//Create scene buffer.
glCreateBuffers(1, &sceneUbo);
glNamedBufferStorage(sceneUbo, sizeof(glm::vec4), 0, GL_DYNAMIC_STORAGE_BIT);
glm::vec4 myVec4 = glm::vec4(0.f, 1.f, 0.f, 1.f);
glNamedBufferSubData(sceneUbo, 0, sizeof(glm::vec4), &myVec4);
//Create object buffer
glCreateBuffers(1, &objectUbo);
glNamedBufferStorage(objectUbo, sizeof(glm::vec4), 0, GL_DYNAMIC_STORAGE_BIT);
glm::vec4 myVec4 = glm::vec4(0.f, 1.f, 0.f, 1.f);
glNamedBufferSubData(objectUbo, 0, sizeof(glm::vec4), &myVec4);
Here is what the the NV_uniform_buffer_unified_memory "bindless" code looks like:
//Get addresses
GLuint64 sceneUboAddr;
glGetNamedBufferParameterui64vNV(sceneUbo, GL_BUFFER_GPU_ADDRESS_NV, &sceneUboAddr);
glMakeNamedBufferResidentNV(sceneUbo, GL_READ_ONLY);
GLuint64 objectUboAddr;
glGetNamedBufferParameterui64vNV(objectUbo, GL_BUFFER_GPU_ADDRESS_NV, &objectUboAddr);
glMakeNamedBufferResidentNV(objectUbo, GL_READ_ONLY);
//You have to call this to turn on bindless buffers.
glEnableClientState(UNIFORM_BUFFER_UNIFIED_NV);
//0 represents the scene UBO's `binding` from GLSL:
glBufferAddressRangeNV(UNIFORM_BUFFER_ADDRESS_NV, 0, sceneUboAddr, sizeof(glm::vec4));
//1 represents the object UBO's `binding` from GLSL:
glBufferAddressRangeNV(UNIFORM_BUFFER_ADDRESS_NV, 1, objectUboAddr, sizeof(glm::vec4));
Note that this extension effectively requires glEnable/DisableClientState, which is a function that was removed from the core profile. So you kind of need to use a compatibility profile to use it.
And just to prove that the non-bindless code is hardly "needlessly complex", here it is:
//The first 0 represents the scene UBO's `binding` from GLSL:
glBindBufferRange(GL_UNIFORM_BUFFER, 0, sceneUbo, 0, sizeof(glm::vec4));
//1 represents the object UBO's `binding` from GLSL:
glBindBufferRange(GL_UNIFORM_BUFFER, 1, objectUbo, 0, sizeof(glm::vec4));

How to use glDrawElementsInstanced + Texture Buffer Objects?

My use case is a bunch a textured quads that I want to draw. I'm trying to use the same indexed array of a quad to draw it a bunch of times and use the gl_InstanceID and gl_VertexID in GLSL to retrieve texture and position info from a Texture Buffer.
The way I understand a Texture Buffer is that I create it and my actual buffer, link them, and then whatever I put in the actual buffer magically appears in my texture buffer?
So I have my vertex data and index data:
struct Vertex
{
GLfloat position[4];
GLfloat uv[2];
};
Vertex m_vertices[4] =
{
{{-1,1,0,1},{0,1}},
{{1,1,0,1},{1,1}},
{{-1,-1,0,1},{0,0}},
{{1,-1,0,1},{1,0}}
};
GLuint m_indices[6] = {0,2,1,1,2,3};
Then I create my VAO, VBO and IBO for the quads:
glGenBuffers(1,&m_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER,m_vertexBuffer);
glBufferData(GL_ARRAY_BUFFER,sizeof(Vertex)*4,&m_vertices,GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER,0);
glGenVertexArrays(1,&m_vao);
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER,m_vertexBuffer);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0,4,GL_FLOAT, GL_FALSE, sizeof(struct Vertex),(const GLvoid*)offsetof(struct Vertex, position));
glEnableVertexAttribArray(1);
glVertexAttribPointer(0,2,GL_FLOAT, GL_FALSE, sizeof(struct Vertex),(const GLvoid*)offsetof(struct Vertex, uv));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER,0);
glBindVertexArray(m_vao);
glGenBuffers(1, &m_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)*6,&m_indices,GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
I'm pretty sure that I've done the above correctly. My quads have 4 vertices, with six indexes to draw triangles.
Next I create my buffer and texture for the the Texture Buffer:
glGenBuffers(1,&m_xywhuvBuffer);
glBindBuffer(GL_TEXTURE_BUFFER, m_xywhuvBuffer);
glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat)*8*100, nullptr, GL_DYNAMIC_DRAW); // 8 floats
glGenTextures(1,&m_xywhuvTexture);
glBindTexture(GL_TEXTURE_BUFFER, m_xywhuvTexture);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, m_xywhuvBuffer); // they're in pairs of 2, in r,g of each texel.
glBindBuffer(GL_TEXTURE_BUFFER,0);
So, the idea is that every four texels belongs to one quad, or gl_InstanceID.
When I'm drawing my quads, they execute the below:
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_TEXTURE_BUFFER, m_xywhuvBuffer);
std::vector<GLfloat> xywhuz =
{
-1.0f + position.x / screenDimensions.x * 2.0f,
1.0f - position.y / screenDimensions.y * 2.0f,
dimensions.x / screenDimensions.x,
dimensions.y / screenDimensions.y,
m_region.x,
m_region.y,
m_region.w,
m_region.h
};
glBufferSubData(GL_TEXTURE_BUFFER, sizeof(GLfloat)*8*m_rectsDrawnThisFrame, sizeof(GLfloat)*8, xywhuz.data());
m_rectsDrawnThisFrame++;
So I increase m_rectsDrawThisFrame for each quad. You'll notice that the data I'm passing is 8 GLfloats, so each of the 4 texels that belong to each gl_InstanceID is the x,y position, the width and height, and then the same details for the real texture that I'm going to texture my quads with.
Finally once all of my rects have updated their section of the GL_TEXTURE_BUFFER I run this:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D,texture); // this is my actual texture that the quads take a section from to texture themselves.
glUniform1i(m_program->GetUniformLocation("tex"),1);
glUniform4f(m_program->GetUniformLocation("color"),1,0,1,1);
glBindVertexArray(m_vao);
glDrawElementsInstanced(GL_TRIANGLES,4,GL_UNSIGNED_INT,0,m_rectsDrawnThisFrame);
m_rectsDrawnThisFrame = 0;
I reset the draw count. I also noticed that I had to activate the texture in the second slot. Does the Texture Buffer Object use up one?
Finally my Vert shader
#version 410
layout (location = 0) in vec4 in_Position;
layout (location = 1) in vec2 in_UV;
out vec2 ex_texcoord;
uniform samplerBuffer buf;
void main(void)
{
vec2 position = texelFetch(buf,gl_InstanceID*4).xy;
vec2 dimensions = texelFetch(buf,gl_InstanceID*4+1).xy;
vec2 uvXY = texelFetch(buf,gl_InstanceID*4+2).xy;
vec2 uvWH = texelFetch(buf,gl_InstanceID*4+3).xy;
if(gl_VertexID == 0)
{
gl_Position = vec4(position.xy,0,1);
ex_texcoord = uvXY;
}
else if(gl_VertexID == 1)
{
gl_Position = vec4(position.x + dimensions.x, position.y,0,1);
ex_texcoord = vec2(uvXY.x + uvWH.x, uvXY.y);
}
else if(gl_VertexID == 2)
{
gl_Position = vec4(position.x, position.y + dimensions.y, 0,1);
ex_texcoord = vec2(uvXY.x, uvXY.y + uvWH.y);
}
else if(gl_VertexID == 3)
{
gl_Position = vec4(position.x + dimensions.x, position.y + dimensions.y, 0,1);
ex_texcoord = vec2(uvXY.x + uvWH.x, uvXY.y + uvWH.y );
}
}
And my Frag shader
#version 410
in vec2 ex_texcoord;
uniform sampler2D tex;
uniform vec4 color = vec4(1,1,1,1);
layout (location = 0) out vec4 FragColor;
void main()
{
FragColor = texture(tex,ex_texcoord) * color;
}
Now the problem, after I'm getting no errors reported in GLIntercept, is that I'm getting nothing drawn on the screen.
Any help?
There is one subtle issue in your code that would certainly stop it from working. At the end of the VAO/VBO setup code, you have this:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
The GL_ELEMENT_ARRAY_BUFFER binding is part of the VAO state. If you unbind it while the VAO is bound, this VAO will not have an element array buffer binding. Which means that you don't have indices when you draw later.
You should simply remove this call:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
Also, since you have 6 indices, the second argument to the draw call should be 6:
glDrawElementsInstanced(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0, m_rectsDrawnThisFrame);
Apart from that, it all looks reasonable to me. But there's quite a lot of code, so I can't guarantee that I would have spotted all problems.
I also noticed that I had to activate the texture in the second slot. Does the Texture Buffer Object use up one?
Yes. The buffer texture needs to be bound, and the value of the sampler variable set to the corresponding texture unit. Since you bind the buffer texture during setup, never unbind it, and the default value of the sampler variable is 0, you're probably fine there. But I think it would be cleaner to set it up more explicitly. Where you prepare for drawing:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER, m_xywhuvTexture);
glUniform1i(m_program->GetUniformLocation("buf"), 0);

Assistance in Debug OpenGL glsl Shader or Code using it

I am working on adding a phong shader to my working program. Basically, after I implemented my new shaders, my code gets a "Segmentation Fault: 11" during:
glDrawArrays(GL_TRIANGLES, 0, mCubes.getArrayNumberOfElements());
I know the number of elements is correct because it worked for my previous, simple shader.
Here is my Vertex Shader:
// vertex shader
attribute vec4 vPosition;
attribute vec3 vNormal;
varying vec4 color; //vertex shader
// light and material properties
uniform vec4 AmbientProduct, DiffuseProduct, SpecularProduct;
uniform mat4 ModelView;
//uniform mat4 Projection;
uniform vec4 LightPosition;
uniform float Shininess;
vec3 L, H, N, pos, E;
vec4 diffuse, specular, ambient;
float Kd, Ks;
void main()
{
// Transform vertex position into eye coordinates
pos = (ModelView * vPosition).xyz;
L = normalize( LightPosition.xyz - pos );
E = normalize( -pos );
H = normalize( L + E );
// Transform vertex normal into eye coordinates
N = normalize( ModelView*vec4(vNormal, 0.0) ).xyz;
// Compute terms in the illumination equation
ambient = AmbientProduct;
Kd = max( dot(L, N), 0.0 );
diffuse = Kd*DiffuseProduct;
Ks = pow( max(dot(N, H), 0.0), Shininess );
specular = Ks * SpecularProduct;
if( dot(L, N) < 0.0 )
specular = vec4(0.0, 0.0, 0.0, 1.0);
gl_Position = ModelView * vPosition;
color = ambient + diffuse + specular;
color.a = 1.0;
}
Here is my display function in which the code ends up getting the fault:
void display(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
vector<float> cell = mCubes.getCell();
mat4 matrix = rot * scale(1.0/cell[0], 1.0/cell[1], 1.0/cell[2]) * translate(-cell[0]/2.0, -cell[1]/2.0, -cell[2]/2.0);
glUniformMatrix4fv(vShaderModelView, 1, GL_TRUE, matrix);
glDrawArrays(GL_TRIANGLES, 0, mCubes.getArrayNumberOfElements());
glutSwapBuffers();
glFlush();
}
And here is my init function that mostly sets up and interacts with the shaders:
void init() {
// Create a vertex array object
GLuint vao;
#ifdef __APPLE__
glGenVertexArraysAPPLE( 1, &vao );
glBindVertexArrayAPPLE( vao );
#else
glGenVertexArrays( 1, &vao );
glBindVertexArray( vao );
#endif
// Create and initialize a buffer object
GLuint buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData( GL_ARRAY_BUFFER,
mCubes.getDisplayArraySize() + mCubes.getDisplayArraySize()*3, NULL, GL_STATIC_DRAW );
GLintptr offset = 0;
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getDisplayArraySize(), mCubes.getDisplayArray());
offset+= mCubes.getDisplayArraySize();
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getDisplayArraySize(), mCubes.getNormalVector());
// Load shaders and use the resulting shader program
string evname = "PROTCAD3DIR";
string path = PCGeneralIO::getEnvironmentVariable(evname);
path += "/data/shaders/";
#ifdef __APPLE__
string vshadername = path + "kw_vshader1_mac.glsl";
string fshadername = path + "kw_fshader1_mac.glsl";
//#else
// string vshadername = path + "kw_vshader1.glsl";
// string fshadername = path + "kw_fshader1.glsl";
#endif
GLuint program = InitShader( vshadername.c_str(), fshadername.c_str() );
glUseProgram(program);
// Initialize the vertex position attribute from the vertex shader
GLuint vShaderPosition = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(vShaderPosition);
glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
GLuint vShaderNormal = glGetAttribLocation(program, "vNormal");
glEnableVertexAttribArray(vShaderNormal);
//glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset)); //this was the ORIGINAL PROBLEM, now commented out and below is solution
glVertexAttribPointer(vShaderNormal, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
vShaderModelView = glGetUniformLocation(program, "ModelView");
vShaderLightPosition = glGetUniformLocation(program, "LightPosition");
vShaderAmbientProduct = glGetUniformLocation(program, "AmbientProduct");
vShaderDiffuseProduct = glGetUniformLocation(program, "DiffuseProduct");
vShaderSpecularProduct = glGetUniformLocation(program, "SpecularProduct");
vShaderShininess = glGetUniformLocation(program, "SpecularProduct");
glEnable( GL_DEPTH_TEST );
vec4 light = vec4(0.5,1.5,1.0,0.0);
glUniform4fv(vShaderLightPosition, 1, light);
vec4 amb = vec4(1.0f,0.0f,0.20f,1.0f);
glUniform4fv(vShaderAmbientProduct, 1, amb);
vec4 diff = vec4(0.5f,0.5f,0.5f,1.0f);
glUniform4fv(vShaderDiffuseProduct, 1, diff);
vec4 spec = vec4(0.80f,0.80f,0.80f,1.0f);
glUniform4fv(vShaderSpecularProduct, 1, spec);
float shin = 6.0f;
glUniform1f(vShaderShininess,shin);
glClearColor(.2, .2, .2, 1); /* Grey background */
}
If you have any question, feel free to ask and I will elaborate. I feel that either the vertex shader itself has a problem, or the way I interact with the shader is doing something wonky. Any help or suggestions are accepted!
EDIT::: (code edited to reflect solution)The problem was in the second:
glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
which should have read:
glVertexAttribPointer(vShaderNormal, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
And was a stupid copy/paste mistake. However, the finished product still does not look correct:
![at Rotation 0 it seems to be fully colored][1]
http://i.stack.imgur.com/CKJ3f.png
![Rotation of a little bit reveals some odd behavior][2]
http://i.stack.imgur.com/kyRfI.png
![Even more rotation leads you to pull your hair out][3]
i.stack.imgur.com/lYOzK.png
![Then it whites out and you know i screwed up!!][4]
i.stack.imgur.com/FZcqF.png
So, as you rotate the color gets screwed up and turns white, black, patterned and everything, but this is obviously incorrect.
Edit::: This is my attempt to "Correct" the issue of passing the wrong amount of values with vNormal:
void init() {
// Create a vertex array object
GLuint vao;
#ifdef __APPLE__
glGenVertexArraysAPPLE( 1, &vao );
glBindVertexArrayAPPLE( vao );
#else
glGenVertexArrays( 1, &vao );
glBindVertexArray( vao );
#endif
// Create and initialize a buffer object
GLuint buffer;
realVec *normArray = new realVec[mCubes.getNormalArraySize()];//vec4 array compared to vec3 array
normArray = mCubes.getNormalVector(); // new array of normals
for(int i=0; i<mCubes.getArrayNumberOfElements();i++){
printf("Normal at %d is %f \n",i,normArray[i][0]); //to print normals
printf("Normal at %d is %f \n",i,normArray[i][1]); //to print normals
printf("Normal at %d is %f \n",i,normArray[i][2]); //to print normals
}
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData( GL_ARRAY_BUFFER,
mCubes.getDisplayArraySize() + mCubes.getNormalArraySize(), NULL, GL_STATIC_DRAW ); //Changed size for vec3 array of normals
GLintptr offset = 0;
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getDisplayArraySize(), mCubes.getDisplayArray());
offset+= mCubes.getDisplayArraySize();
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getNormalArraySize(), normArray);
// Load shaders and use the resulting shader program
string evname = "PROTCAD3DIR";
string path = PCGeneralIO::getEnvironmentVariable(evname);
path += "/data/shaders/";
#ifdef __APPLE__
string vshadername = path + "kw_vshader1_mac.glsl";
string fshadername = path + "kw_fshader1_mac.glsl";
//#else
// string vshadername = path + "kw_vshader1.glsl";
// string fshadername = path + "kw_fshader1.glsl";
#endif
GLuint program = InitShader( vshadername.c_str(), fshadername.c_str() );
glUseProgram(program);
//offset =0;
// Initialize the vertex position attribute from the vertex shader
GLuint vShaderPosition = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(vShaderPosition);
glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
GLuint vShaderNormal = glGetAttribLocation(program, "vNormal");
glEnableVertexAttribArray(vShaderNormal);
glVertexAttribPointer(vShaderNormal, 3, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
//vShaderMatrix = glGetUniformLocation(program, "vMatrix");
//vShaderColor = glGetUniformLocation(program, "vColor")
vShaderModelView = glGetUniformLocation(program, "ModelView");
vShaderLightPosition = glGetUniformLocation(program, "LightPosition");
//vShaderProjection = glGetUniformLocation(program, "Projection");
vShaderAmbientProduct = glGetUniformLocation(program, "AmbientProduct");
vShaderDiffuseProduct = glGetUniformLocation(program, "DiffuseProduct");
vShaderSpecularProduct = glGetUniformLocation(program, "SpecularProduct");
vShaderShininess = glGetUniformLocation(program, "SpecularProduct");
glEnable( GL_DEPTH_TEST );
vec4 light = vec4(0.5,1.5,1.0,0.0);
glUniform4fv(vShaderLightPosition, 1, light);
vec4 amb = vec4(1.0f,0.0f,0.20f,1.0f);
glUniform4fv(vShaderAmbientProduct, 1, amb);
vec4 diff = vec4(0.5f,0.5f,0.5f,1.0f);
glUniform4fv(vShaderDiffuseProduct, 1, diff);
vec4 spec = vec4(0.80f,0.80f,0.80f,1.0f);
glUniform4fv(vShaderSpecularProduct, 1, spec);
float shin = 6.0f;
glUniform1f(vShaderShininess,shin);
glClearColor(.2, .2, .2, 1); /* Grey background */
}
Should I maybe change the light, ambient, specular, and diffuse properties? I am not sure what the problem is.
You pass your vNormal attribute data using the following code
glVertexAttribPointer(vShaderNormal, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
This indicates that your normal have 4 components, whereas in your vertex shader you declare it as
attribute vec3 vNormal;
This mismatch may be related to your problem if the normals are misinterpreted.