OpenGL Vertex Buffer incorrect render - c++

I'm having a bit of an odd problem. I'm trying to render some data with OpenGL on my Windows system. I found a set of tutorials at opengl-tutorial.org which were written for OpenGL 3.3. As my laptop (where I do a great deal of developing) only supports OpenGL 2.1, I proceeded to download the OpenGL 2.1 port of the tutorial. I messed around with it a bit, adding features and refactoring it for scalability, but noticed something odd. Whenever I rendered my data with Vertex Buffer Objects, I got a rather incorrect representation of my data. This is shown below.
http://www.majhost.com/gallery/DagonEcelstraun/Others/HelpNeeded/badrender.png
However, when I specify my data using glVertex3fv and such, I get a much nicer result, again shown below.
http://www.majhost.com/gallery/DagonEcelstraun/Others/HelpNeeded/goodrender.png
The problem occurs both on my Windows 8.1 laptop with Intel i3 integrated graphics and on my Windows 7 desktop with its nVidia GTX 660, so it's not a hardware problem. Does anyone know what may be the issue here?
Loading mesh data:
const aiScene *scene = aiImportFile( sName.c_str(),
aiProcessPreset_TargetRealtime_MaxQuality | aiProcess_FlipUVs );
const aiMesh *mesh = scene->mMeshes[0];
for( int i = 0; i < mesh->mNumVertices; i++ ) {
meshData.push_back( mesh->mVertices[i][0] );
meshData.push_back( mesh->mVertices[i][1] );
meshData.push_back( mesh->mVertices[i][2] );
meshData.push_back( mesh->mNormals[i][0] );
meshData.push_back( mesh->mNormals[i][1] );
meshData.push_back( mesh->mNormals[i][2] );
meshData.push_back( mesh->mTextureCoords[0][i][0] );
meshData.push_back( mesh->mTextureCoords[0][i][1] );
meshData.push_back( 0 );
meshData.push_back( mesh->mTangents[i][0] );
meshData.push_back( mesh->mTangents[i][1] );
meshData.push_back( mesh->mTangents[i][2] );
}
for( int i = 0; i < mesh->mNumFaces; i++ ) {
for( int j = 0; j < 3; j++ ) {
indices.push_back( mesh->mFaces[i].mIndices[j] );
}
}
Sending data to the graphics card for the first time (called right after previous code):
glGenBuffers( 1, &glVertData );
glBindBuffer( GL_ARRAY_BUFFER, glVertData );
glBufferData( GL_ARRAY_BUFFER, meshData.size() * sizeof( GLfloat ), &meshData[0], GL_STATIC_DRAW );
// Generate a buffer for the indices as well
glGenBuffers( 1, &glIndexes );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, glIndexes );
glBufferData( GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW );
Rendering the mesh:
//Tell the shader to use our data
//bindVerts, bindUvs, bindNorms, and bindTangents refer to attribute variables in my shader
//vertexPosition_modelspace, vertexUV, vertexNormal_modelspace, and vertexTangent_modelspace, respectively.
this->verts = bindVerts;
this->uvs = bindUvs;
this->norms = bindNorms;
this->tangents = bindTangents;
glEnableVertexAttribArray( verts );
glEnableVertexAttribArray( uvs );
glEnableVertexAttribArray( norms );
glEnableVertexAttribArray( tangents );
//Specify how the graphics card should decode our data
// 1rst attribute buffer : vertices
glBindBuffer( GL_ARRAY_BUFFER, glVertData );
glVertexAttribPointer( verts, 3, GL_FLOAT, GL_FALSE, 12, (void*) 0 );
// 2nd attribute buffer : normals
glVertexAttribPointer( norms, 3, GL_FLOAT, GL_FALSE, 12, (void*) 3 );
//3rd attribute buffer : UVs
glVertexAttribPointer( uvs, 3, GL_FLOAT, GL_FALSE, 12, (void*) 6 );
//4th attribute buffer: tangents
glVertexAttribPointer( tangents, 3, GL_FLOAT, GL_FALSE, 12, (void*) 9 );
// Index buffer
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, glIndexes );
//rendering the mesh with VBOs:
glDrawElements( GL_LINES, indices.size(), GL_UNSIGNED_SHORT, (void*) 0 );
//specifying the vertex data individually:
glBegin( GL_TRIANGLES );
int ind;
for( int i = 0; i < indices.size(); i++ ) {
ind = indices[i] * 12;
glNormal3fv( &meshData[ind + 3] );
glTexCoord2fv( &meshData[ind + 6] );
glVertex3fv( &meshData[ind] );
}
glEnd();
//clean up after the render
glDisableVertexAttribArray( verts );
glDisableVertexAttribArray( uvs );
glDisableVertexAttribArray( norms );
glDisableVertexAttribArray( tangents );
My vertex shader:
#version 130
// Input vertex data, different for all executions of this shader.
//it doesn't work, so we'll just get rid of it
attribute vec3 vertexPosition_modelspace;
attribute vec3 vertexUV;
attribute vec3 vertexNormal_modelspace;
attribute vec3 vertexTangent_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out vec4 ShadowCoord;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightInvDirection_worldspace;
uniform mat4 DepthBiasMVP;
uniform sampler2D normalMap;
attribute vec3 vTangent;
void main() {
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4( vertexPosition_modelspace, 1 );
ShadowCoord = DepthBiasMVP * vec4( vertexPosition_modelspace, 0 );
// Position of the vertex, in worldspace : M * position
Position_worldspace = ( M * vec4( vertexPosition_modelspace, 0 ) ).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
EyeDirection_cameraspace = vec3( 0, 0, 0 ) - ( V * M * vec4( vertexPosition_modelspace, 0 ) ).xyz;
// Vector that goes from the vertex to the light, in camera space
LightDirection_cameraspace = ( V * vec4( LightInvDirection_worldspace, 0 ) ).xyz;
// UV of the vertex. No special space for this one.
UV = vertexUV.st;
// Normal of the the vertex, in camera space
// Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
Normal_cameraspace = ( V * M * vec4( vertexNormal_modelspace.xyz, 0 ) ).xyz;
}
Fragment shader:
#version 130
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
in vec4 ShadowCoord;
out vec4 fragColor;
// Values that stay constant for the whole mesh.
uniform sampler2D diffuse;
uniform mat4 MV;
uniform vec3 LightPosition_worldspace;
uniform sampler2D shadowMap;
//uniform int shadowLevel; //0 is no shadow, 1 is hard shadows, 2 is soft shadows, 3 is PCSS
// Returns a random number based on a vec3 and an int.
float random( vec3 seed, int i ) {
vec4 seed4 = vec4( seed, i );
float dot_product = dot( seed4, vec4( 12.9898, 78.233, 45.164, 94.673 ) );
return fract( sin( dot_product ) * 43758.5453 );
}
int mod( int a, int b ) {
return a - (a / b);
}
void main() {
int shadowLevel = 1; //let's just do hard shadows
// Light emission properties
vec3 LightColor = vec3( 1, 1, 1 );
float LightPower = 1.0f;
// Material properties
vec3 MaterialDiffuseColor = texture( diffuse, UV ).rgb;
vec3 MaterialAmbientColor = vec3( 0.1, 0.1, 0.1 ) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3( 0.3, 0.3, 0.3 );
vec3 n = normalize( Normal_cameraspace );
vec3 l = normalize( LightDirection_cameraspace );
float cosTheta = clamp( dot( n, l ), 0.2, 1 );
// Eye vector (towards the camera)
vec3 E = normalize( EyeDirection_cameraspace );
// Direction in which the triangle reflects the light
vec3 R = reflect( -l, n );
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E, R ), 0, 1 );
float visibility = 1.0;
//variable bias
float bias = 0.005 * tan( acos( cosTheta ) );
bias = clamp( bias, 0, 0.01 );
// dFragment to the light
float dFragment = ( ShadowCoord.z-bias ) / ShadowCoord.w;
float dBlocker = 0;
float penumbra = 1;
float wLight = 5.0;
if( shadowLevel == 3 ) {
// Sample the shadow map 8 times
float count = 0;
float temp;
float centerBlocker = texture( shadowMap, ShadowCoord.xy).r;
float scale = (wLight * (dFragment - centerBlocker)) / dFragment;
for( int i = 0; i < 16; i++ ) {
temp = texture( shadowMap, ShadowCoord.xy + (scale * poissonDisk( i ) / 50.0) ).r;
if( temp < dFragment ) {
dBlocker += temp;
count += 1;
}
}
if( count > 0 ) {
dBlocker /= count;
penumbra = wLight * (dFragment - dBlocker) / dFragment;
}
}
if( shadowLevel == 1 ) {
if( texture( shadowMap, ShadowCoord.xy).r < dFragment ) {
visibility -= 0.8;
}
} else if( shadowLevel > 1 ) {
float iterations = 32;
float sub = 0.8f / iterations;
for( int i = 0; i < iterations; i++ ) {
int index = mod( int( 32.0 * random( gl_FragCoord.xyy, i ) ), 32 );
if( texture( shadowMap, ShadowCoord.xy + (penumbra * poissonDisk( index ) / 250.0) ).r < dFragment ) {
visibility -= sub;
}
}
}
visibility = min( visibility, cosTheta );
//MaterialDiffuseColor = vec3( 0.8, 0.8, 0.8 );
fragColor.rgb = MaterialAmbientColor +
visibility * MaterialDiffuseColor * LightColor * LightPower +
visibility * MaterialSpecularColor * LightColor * LightPower * pow( cosAlpha, 5 );
}
Note that poissonDisk( int ind ) returns a vec2 with a magnitude of no more than 1 which is in a poisson disk distribution. Even though I'm using shader version 130, I used a function and not an array because the array runs rather slowly on my laptop.
I do bind that shader before I do any rendering. I also make sure to upload the correct variables to all of my uniforms, but I didn't show that to save space since I know it's working correctly.
Does anyone know what's causing this incorrect render?

Well, first of all, stop drawing the VBO using GL_LINES. Use the same primitive mode for immediate mode and VBO drawing.
Also, since when is 3*4 = 3? The address (offset) in your VBO vertex pointers should be the number of elements multiplied by the size of the data type when using an interleaved data structure. GL_FLOAT is 4 bytes, if you have a 3-component vertex position this means that the offset to the next field in your VBO is 3*4 = (void *)12, not (void *)3. This process must continue for each additional vertex array pointer, they all use incorrect offsets.
Likewise, the stride of your VBO should be 12 * sizeof (GLfloat) = 48, not 12.

Related

opengl drawing multipe objects from one vao

i have a single vao that contains a character set of a font.
Each character is drawn with a set of indices belonging to it.
The idea was to write a couple of chars and bind the vao only one time.
Everything works fine - except the positioning of the glyphs.
The loop is like
glGetUniformLocations()
glBindVertexArray( vao )
for( i=0; i< lg; i++ )
{
glUniforMatrix4fV(); // send translation matrix to shader
glDrawElements( part of the indexbuffer );
trans_mx = glm::translate(); // compute the translation matrix
}
Vertexshader is:
#version 450 core
layout( location = 0 ) in vec3 vx; // the vertex_buffer in modelspace
layout( location = 1 ) in vec4 vx_col; // color of each vertex
out vec4 fragment_color;
uniform mat4 mvp_mx;
uniform mat4 trans_mx;
void main()
{
gl_Position = mvp_mx * vec4( trans_mx * vec4( vx, 1.0f ) );
fragment_color = vx_col;
}
The translation works - but not between the draw calls.
All chars are drawn at the same position. which is translated.
For me it seems as if the translation matrix will not be updated.
??????
this is the code from the function that draws the chars
lg = strlen( str );
vao = sfs->vao;
_sys.mvp_mx = _sys.proj_mx * _sys.view_mx * vao->model_mx; // compute global mvp
sh_s = &__sh_list[ vao->shd_ind ]; // pointer to our shader_struct
sh_s->get_uniform_loc(); // get uniform locations for this shader
glBindVertexArray( vao->id ); // bind vao
glUseProgram( sh_s->prg_id ); // use the shader
for( c1 = 0; c1 < lg; c1++ )
{
sh_s->send_uniform(); // send uniforms to the shader
i_seg = &vao->ind_b->seg[ str[ c1 ] ]; // segment for this character
glDrawElements( i_seg->prim, i_seg->count, i_seg->type, ( void* )i_seg->offset );
_sys.trans_mx = glm::translate( glm::mat4( 1.0f ), glm::vec3( 10.0f, 0.0f, 0.0f ) );
}
glBindVertexArray( 0 ); // unbind vao
glUseProgram( 0 ); // unbind shader
The expression
for( c1 = 0; c1 < lg; c1++ )
{
.....
_sys.trans_mx = glm::translate(glm::mat4(1.0f), glm::vec3(10.0f, 0.0f, 0.0f));
}
will ongoing calculate the same matrix.
If you want to calculate a continuously changing matrix in the loop, then you have to continually change the matrix based on its current value:
for( c1 = 0; c1 < lg; c1++ )
{
.....
_sys.trans_mx = glm::translate(_sys.trans_mx, glm::vec3(10.0f, 0.0f, 0.0f));
}
Or you calculate the matrix dependent on the control variable of the loop
for( c1 = 0; c1 < lg; c1++ )
{
.....
_sys.trans_mx = glm::translate(glm::mat4(1.0f), glm::vec3(10.0f * c1, 0.0f, 0.0f));
}

GL11.glDrawElements does not work

First, i'm using LWJGL 3 and OpenGL 3.2
I'm trying to use the "indices" with the function GL11.glDrawElements but nothing rendered in the window.
Buffers generation code ( do not really useindices but I think it can still work) :
public void updateBuffers(Game game, int positionsAttrib, int texCoordsAttrib) { // positionsAttrib and texCoordsAttrib are pointer to shader program attribs
FloatBuffer positionsBuffer = null;
FloatBuffer texCoordsBuffer = null;
IntBuffer indicesBuffer = null;
try {
this.vertexCount = this.tiles.size() * 4;
positionsBuffer = MemoryUtil.memAllocFloat( this.tiles.size() * 3 * 4 );
texCoordsBuffer = MemoryUtil.memAllocFloat( this.tiles.size() * 2 * 4 );
indicesBuffer = MemoryUtil.memAllocInt( this.vertexCount );
int i = 0;
for ( Entry<TilePosition, Tile> tilesEntry : this.tiles.entrySet() ) {
TilePosition tilePosition = tilesEntry.getKey();
Tile tile = tilesEntry.getValue();
String tileTextureIdentifier = tile.getTextureIdentifier();
TextureDefinition tileTextureDefinition = game.getTexturesManager().getTextureDefinition("tiles");
Rectangle tileTextureRectangle = tileTextureDefinition.getTilePosition( tileTextureIdentifier );
if ( tileTextureRectangle == null ) continue;
positionsBuffer.put( tilePosition.getX() ).put( tilePosition.getY() + 1 ).put( 0 );
positionsBuffer.put( tilePosition.getX() + 1 ).put( tilePosition.getY() + 1 ).put( 0 );
positionsBuffer.put( tilePosition.getX() + 1 ).put( tilePosition.getY() ).put( 0 );
positionsBuffer.put( tilePosition.getX() ).put( tilePosition.getY() ).put( 0 );
texCoordsBuffer.put( tileTextureRectangle.x ).put( tileTextureRectangle.y );
texCoordsBuffer.put( tileTextureRectangle.x + tileTextureRectangle.width ).put( tileTextureRectangle.y );
texCoordsBuffer.put( tileTextureRectangle.x + tileTextureRectangle.width ).put( tileTextureRectangle.y + tileTextureRectangle.height );
texCoordsBuffer.put( tileTextureRectangle.x ).put( tileTextureRectangle.y + tileTextureRectangle.height );
indicesBuffer.put( i ).put( i + 1 ).put( i + 2 ).put( i + 3 );
i += 4;
}
positionsBuffer.flip();
texCoordsBuffer.flip();
indicesBuffer.flip();
this.vao.bind(); // vbo and vao are class VertexBufferObject and VertexArrayObject which save internal id of buffers and most usefull functions
this.positionsVbo.bind( GL15.GL_ARRAY_BUFFER );
VertexBufferObject.uploadData( GL15.GL_ARRAY_BUFFER, positionsBuffer, GL15.GL_STATIC_DRAW );
ShaderProgram.pointVertexAttribute( positionsAttrib, 3, 0, 0 );
this.texCoordsVbo.bind( GL15.GL_ARRAY_BUFFER );
VertexBufferObject.uploadData( GL15.GL_ARRAY_BUFFER, texCoordsBuffer, GL15.GL_STATIC_DRAW );
ShaderProgram.pointVertexAttribute( texCoordsAttrib, 2, 0, 0 );
this.indicesVbo.bind( GL15.GL_ELEMENT_ARRAY_BUFFER );
VertexBufferObject.uploadData( GL15.GL_ELEMENT_ARRAY_BUFFER, indicesBuffer, GL15.GL_STATIC_DRAW );
VertexArrayObject.unbind();
} finally {
if ( positionsBuffer != null ) MemoryUtil.memFree( positionsBuffer );
if ( texCoordsBuffer != null ) MemoryUtil.memFree( texCoordsBuffer );
if ( indicesBuffer != null ) MemoryUtil.memFree( indicesBuffer );
}
}
Used shader program :
// scene.vs :
#version 330 // edit : I have to change this line because of OpenGL used version
layout (location=0) in vec3 position;
layout (location=1) in vec2 texCoord;
out vec2 outTexCoord;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
void main() {
mat4 mvp = projection * view * model;
gl_Position = mvp * vec4( position, 1.0 );
outTexCoord = texCoord;
}
// scene.fs :
#version 330
in vec2 outTexCoord;
out vec4 fragColor;
uniform sampler2D textureSampler;
void main() {
vec3 vertexColor = vec3( 1.0, 1.0, 1.0 );
vec4 textureColor = texture( textureSampler, outTexCoord );
fragColor = vec4( vertexColor, 1.0 ) * textureColor;
}
And the rendering functions :
private void beginRender(Game game, int positionsAttrib, int texCoordsAttrib) {
Texture texture = game.getTexturesManager().getTextureDefinition("tiles").getTexture();
GL13.glActiveTexture( GL13.GL_TEXTURE0 );
texture.bind();
this.vao.bind();
ShaderProgram.enableVertexAttribute( positionsAttrib );
ShaderProgram.enableVertexAttribute( texCoordsAttrib );
}
private void endRender(Game game, int positionsAttrib, int texCoordsAttrib) {
ShaderProgram.disableVertexAttribute( positionsAttrib );
ShaderProgram.disableVertexAttribute( texCoordsAttrib );
VertexArrayObject.unbind();
Texture.unbind();
}
// render is called by render loop between clear and swapbuffer GL functions
public void render(Game game, int positionsAttrib, int texCoordsAttrib) {
this.beginRender( game, positionsAttrib, texCoordsAttrib );
GL11.glDrawElements( GL11.GL_QUADS, this.vertexCount, GL11.GL_UNSIGNED_INT, 0 );
this.endRender( game, positionsAttrib, texCoordsAttrib );
}
I'm not sure it's very clear, especially with my approximate English ..
You don't expicitely state it, but it looks like you're using a core profile (as you should). However, if so, GL_QUADS will be not available, and your draw call will just result in a GL_INVALID_ENUM error.
As a side note: since you say you use OpenGL 4.5, I'd strongly recommend that you use OpenGL's debug output feature during development, it will make spotting and interpreting any GL errors much easier, and might furthermore provide useful performance hints.

Opengl lighting illuminating the wrong surfaces

I am using OpenGL to display simple objects and a light above them. The problem is the faces of my object are not enlighted the right way. Here is my result
The light is supposed to be above the object
I load objects from wavefront files like this :
if ( strcmp( lineHeader, "v" ) == 0 ){
glm::vec3 vertex;
fscanf(file, "%f %f %f\n", &vertex.x, &vertex.y, &vertex.z );
vertices.push_back(vertex);
}else if ( strcmp( lineHeader, "vt" ) == 0 ){
glm::vec2 uv;
fscanf(file, "%f %f\n", &uv.x, &uv.y );
uv.y = uv.y;
// Invert V coordinate since we will only use DDS texture, which are inverted. Remove if you want to use TGA or BMP loaders.
temp_uvs.push_back(uv);
}else if ( strcmp( lineHeader, "vn" ) == 0 ){
glm::vec3 normal;
fscanf(file, "%f %f %f\n", &normal.x, &normal.y, &normal.z );
temp_normals.push_back(normal);
}else if ( strcmp( lineHeader, "f" ) == 0 ){
std::string vertex1, vertex2, vertex3;
unsigned int vertexIndex[3], uvIndex[3], normalIndex[3];
int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex[1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2] );
if (matches != 9){
printf("File can't be read by our simple parser :-( Try exporting with other options\n");
return false;
}
indices.push_back(vertexIndex[0]-1);
indices.push_back(vertexIndex[1]-1);
indices.push_back(vertexIndex[2]-1);
uvIndices .push_back(uvIndex[0]);
uvIndices .push_back(uvIndex[1]);
uvIndices .push_back(uvIndex[2]);
normalIndices.push_back(normalIndex[0]);
normalIndices.push_back(normalIndex[1]);
normalIndices.push_back(normalIndex[2]);
}else{
// Probably a comment, eat up the rest of the line
char stupidBuffer[1000];
fgets(stupidBuffer, 1000, file);
}
}
normals.reserve(indices.size());
uvs.reserve(indices.size());
for( unsigned int i=0; i<indices.size(); i++ ){
// Get the indices of its attributes
unsigned int uvIndex = uvIndices[i];
unsigned int normalIndex = normalIndices[i];
normals[indices[i]] = temp_normals[normalIndex-1];
uvs[indices[i]] = temp_uvs[uvIndex-1];
the vertex shader :
#version 150 core
in vec2 color;
in vec3 position;
in vec3 normal;
out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
void main() {
// Position of the vertex, in worldspace : M * position
Position_worldspace = (M * vec4(position.x , position.y , position.z ,1.0)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(position,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
// Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace;
// Normal of the the vertex, in camera space
Normal_cameraspace = ( V * M * vec4(normal,0)).xyz; // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
// UV of the vertex. No special space for this one.
UV = color;
gl_Position = MVP*vec4(position.x , position.y , position.z , 1.0);
};
and my fragment shader is :
#version 150 core
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
out vec4 outColor
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
uniform vec3 LightPosition_worldspace;
void main(){
vec3 LightColor = vec3(1,1,1);
float LightPower = 20.0f;
// Material properties
vec3 MaterialDiffuseColor = texture2D( myTextureSampler, UV ).rgb;
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3);
// Distance to the light
float distance = length( LightPosition_worldspace - Position_worldspace );
// Normal of the computed fragment, in camera space
vec3 n = normalize( Normal_cameraspace );
// Direction of the light (from the fragment to the light)
vec3 l = normalize( LightDirection_cameraspace );
// Cosine of the angle between the normal and the light direction,
// clamped above 0
// - light is at the vertical of the triangle -> 1
// - light is perpendicular to the triangle -> 0
// - light is behind the triangle -> 0
float cosTheta = clamp( dot( n,l ), 0,1 );
// Eye vector (towards the camera)
vec3 E = normalize(EyeDirection_cameraspace);
// Direction in which the triangle reflects the light
vec3 R = reflect(-l,n);
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E,R ), 0,1 );
outColor.rgb =
// Ambient : simulates indirect lighting
MaterialAmbientColor +
// Diffuse : "color" of the object
MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) +
// Specular : reflective highlight, like a mirror
MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance);
}
here is the cube loaded :
# cube.obj
#
o cube
v 0.0 0.0 0.0
v 0.0 0.0 1.0
v 0.0 1.0 0.0
v 0.0 1.0 1.0
v 1.0 0.0 0.0
v 1.0 0.0 1.0
v 1.0 1.0 0.0
v 1.0 1.0 1.0
vn 0.0 0.0 1.0
vn 0.0 0.0 -1.0
vn 0.0 1.0 0.0
vn 0.0 -1.0 0.0
vn 1.0 0.0 0.0
vn -1.0 0.0 0.0
vt 0.25 0.0
vt 0.5 0.0
vt 0 0.25
vt 0.25 0.25
vt 0.5 0.25
vt 0.75 0.25
vt 0.0 0.5
vt 0.25 0.5
vt 0.5 0.5
vt 0.75 0.5
vt 0.25 0.75
vt 0.5 0.75
vt 0.25 1.0
vt 0.5 1.0
f 1/11/2 7/14/2 5/12/2
f 1/11/2 3/13/2 7/14/2
f 1/7/6 4/4/6 3/3/6
f 1/7/6 2/8/6 4/4/6
f 3/1/3 8/5/3 7/2/3
f 3/1/3 4/4/3 8/5/3
f 5/10/5 7/6/5 8/5/5
f 5/10/5 8/5/5 6/9/5
f 1/11/4 5/12/4 6/9/4
f 1/11/4 6/9/4 2/8/4
f 2/8/1 6/9/1 8/5/1
f 2/8/1 8/5/1 4/4/1
and how i load my VBOs :
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// Create a Vertex Buffer Object and copy the vertex data to it
glGenBuffers(1, &position_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);
// Create an element array
glGenBuffers(1, &elements_array_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size()*sizeof(GLuint), &indices[0], GL_STATIC_DRAW);
glGenBuffers(1, &normal_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);
if (textured) {
texture = new sf::Texture();
if(!texture->loadFromFile("textures/uv.jpeg"/*,sf::IntRect(0, 0, 128, 128)*/))
std::cout << "Error loading texture !!" << std::endl;
glGenBuffers(1, &color_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec3), &uvs[0], GL_STATIC_DRAW);
}
Here is the code that concerns rendering the scene :
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(shaderProgram, "myTextureSampler");
if(!TextureID)
cout << "TextureID not found ..." << endl;
glActiveTexture(GL_TEXTURE0);
sf::Texture::bind(texture);
glUniform1i(TextureID, 0);
// 2nd attribute buffer : UVs
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(vertexUVID==-1)
cout << "vertexUVID not found ..." << endl;
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);
// 3rd attribute buffer : normals
GLuint vertexNormal_modelspaceID = glGetAttribLocation(shaderProgram, "normal");
if(!vertexNormal_modelspaceID)
cout << "vertexNormal_modelspaceID not found ..." << endl;
glEnableVertexAttribArray(vertexNormal_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
glVertexAttribPointer(vertexNormal_modelspaceID, 3, GL_FLOAT, GL_FALSE, 0, 0 );
// Specify the layout of the vertex data
GLint posAttrib;
posAttrib = glGetAttribLocation(shaderProgram, "position");
// glBindAttribLocation(shaderProgram,posAttrib,"position");
if(!posAttrib)
cout << "posAttrib not found ..." << endl;
glEnableVertexAttribArray(posAttrib);
glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
glVertexAttribPointer(posAttrib, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
// Draw a rectangle from the indices_size/3 triangles using indices_size indices
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
// glDrawArrays(GL_TRIANGLES,posAttrib,indices.size());
while ((error = glGetError()) != GL_NO_ERROR) {
cerr << "OpenGL error: " << error << endl;
}
I have the feeling that my normals are not loaded properly, furthermore i was wondering if in my element array i had to put informations about normals and uvs or if those were just taken in the classic way without indexing.
EDIT : changed the parser, now the vertices are loading fine but lighting and texture are not applied properly.
Here:
normals.reserve(indices.size());
uvs.reserve(indices.size());
do not alter the size but just capacity (try yourself: http://ideone.com/FbXtbm), so e.g. this
glBufferData(GL_ARRAY_BUFFER, /*->*/normals.size() /*<-*/ * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);
receives a zero buffer size as an argument.
There's a syntax error in the fragment shader
in vec3 LightDirection_cameraspace;
/*->*/ out vec4 outColor /*<-*/
// Values that stay constant for the whole mesh.
Add a ";" after outColor.
Your arrays are not properly set for your glDrawElements call.
I'll add some sample code after I'll have had my breakfast coffee.
EDIT 11:02
The cube has 8 vertices, and to draw it properly, you need 3 normals for each.
(for simplicity Sake, I did the same with the uvs):
}
indices.push_back(vertexIndex[0]-1);
indices.push_back(vertexIndex[1]-1);
indices.push_back(vertexIndex[2]-1);
uvIndices .push_back(uvIndex[0]-1);
uvIndices .push_back(uvIndex[1]-1);
uvIndices .push_back(uvIndex[2]-1);
normalIndices.push_back(normalIndex[0]-1);
normalIndices.push_back(normalIndex[1]-1);
normalIndices.push_back(normalIndex[2]-1);
}else{
// Probably a comment, eat up the rest of the line
char stupidBuffer[1000];
fgets(stupidBuffer, 1000, file);
}
}
#if 1 // EITHER
vertices.resize(indices.size());
normals.resize(indices.size());
uvs.resize(indices.size());
for( unsigned int i=0; i<indices.size(); ++i){
vertices[i] = temp_vertices[indices[i]];
normals[i] = temp_normals[normalIndices[i]];
uvs[i] = temp_uvs[uvIndices[i]];
}
#else // OR
vertices.reserve(indices.size());
normals.reserve(indices.size());
uvs.reserve(indices.size());
for( unsigned int i=0; i<indices.size(); ++i){
vertices.push_back(temp_vertices[indices[i]]);
normals.push_back(temp_normals[normalIndices[i]]);
uvs.push_back(temp_uvs[uvIndices[i]]);
}
#endif
struct yield {
int i;
yield() : i(0) {}
int operator() (){ return i++;}
};
std::generate(indices.begin(), indices.end(), yield());
std::clog << "num vertices: " << vertices.size() << std::endl
<< "num normals: " << normals.size() << std::endl
<< "num uvs: " << uvs.size() << std::endl
<< "num indices: " << indices.size() << std::endl;
Pls note that I also altered sth in the loop; I decremented all indices right there.
One would not have to unfold all indices of all triangles, but this is the simplest way.
I also re-factored your shaders
#version 150 core
in vec2 color;
in vec3 position;
in vec3 normal;
out vec2 UV;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
void main() {
// Position of the vertex, in worldspace : M * position
vec3 wPos = (M * vec4(position, 1.0)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(position,1)).xyz;
EyeDirection_cameraspace = -vertexPosition_cameraspace;
// Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
LightDirection_cameraspace = LightPosition_cameraspace - vertexPosition_cameraspace;
// Normal of the the vertex, in camera space
#if 0
// Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
Normal_cameraspace = (V * M * vec4(normal,0)).xyz;
#else
Normal_cameraspace = mat3(V) * inverse(transpose(mat3(M))) * normal;
#endif
Normal_cameraspace = normalize(Normal_cameraspace);
// UV of the vertex. No special space for this one.
UV = color;
gl_Position = MVP*vec4(position, 1.0);
} // void main()
#version 150 core
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
out vec4 outColor;
const float SHININESS = 5.0;
const float AMBIENCE = 0.1;
const float SPECULARITY = 0.3;
const vec3 LIGHT_COLOR = vec3(1.0, 1.0, 1.0);
const float LIGHT_INTENSITY = 300.0;
//uniform sampler2D myTextureSampler;
//uniform vec3 LightPosition_worldspace;
float lambert_fac(vec3 lightPos, vec3 normal) {
vec3 l_ = normalize(lightPos);
vec3 n_ = normalize(normal);
return max(dot(l_, n_),0.0);
}
float phong_fac(vec3 eyePos, vec3 lightPos, vec3 normal, float shiny) {
vec3 e_ = normalize(eyePos);
vec3 l_ = normalize(lightPos);
vec3 n_ = normalize(normal);
vec3 r_ = normalize(reflect(-l_, n_));
return pow(max(dot(r_, e_),0.0), shiny);
}
float attenuate(float d/*distance*/, float c/*constant*/,
float l/*linear*/, float q/*quadratic*/) {
return 1.0/(c + l * d + q * d * d);
}
struct Material {
vec3 ambient, diffuse, specular;
};
void main(){
Material mat;
/*texture2D( myTextureSampler, UV ).rgb;*/
vec3 baseColor =
vec3(UV.s, UV.t, clamp(UV.s + UV.t,0.,1.)); // just to save some attributes contributing to
// from the optimizer
mat.ambient = mat.diffuse = mat.specular = baseColor;
mat.ambient *= AMBIENCE; mat.specular *= SPECULARITY;
// attenuation
float att = attenuate(length(LightDirection_cameraspace), 0., 0., 1.);
// light
vec3 l_ = LIGHT_COLOR * LIGHT_INTENSITY * att;
// Diffuse Contribution
float dc = lambert_fac(LightDirection_cameraspace, Normal_cameraspace);
// Specular Contribution
float sc = phong_fac(EyeDirection_cameraspace,
LightDirection_cameraspace,
Normal_cameraspace,
SHININESS);
outColor = vec4(mat.ambient
+ mat.diffuse * dc * l_
+ mat.specular * sc * l_, 1.0);
} // void main()
And this:
is what it looks like now

values are not passed correctly to fragment shader for vertex attribute index greater than 2

I have been trying to display a mesh wireframe and pass each edge its own color as vertex attribute array. For that I have used two vertices in the vertex buffer for each edge of the mesh. I could display the edges correctly. The problem is coming for attribute locations 2 , 3 , 4 in the shader , it seems like I am not receiving the values that I loaded in the VBO. And when I just reorder the vertex attribute , say I put the 'eColor' attribute at location 0 , I start getting correct value in the fragment shader. I want to know the reason , why this is happening. I have executed the same code in different environment , windows/mac , with glfw/Qt , everywhere I am facing the the same issue. Can anyone please point my mistake.
Following is the code for vertex attribute binding:
GL_CHECK( glBindVertexArray( mVAO ) );
GL_CHECK( glUseProgram( mDebugProgram ) );
GL_CHECK( glGenBuffers( 2 , mVBO ) );
GL_CHECK( glBindBuffer( GL_ARRAY_BUFFER , mVBO[ 0 ] ) );
GL_CHECK( glBufferData( GL_ARRAY_BUFFER , mDebugVertexData.size() * sizeof( DebugVertexData ) , mDebugVertexData.data() , GL_DYNAMIC_DRAW ) );
GL_CHECK( glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , mVBO[ 1 ] ) );
GL_CHECK( glBufferData( GL_ELEMENT_ARRAY_BUFFER , mDebugWFIndices.size() * sizeof( GLuint ) , mDebugWFIndices.data() , GL_DYNAMIC_DRAW ) );
int offset = 0;
GL_CHECK( glVertexAttribPointer( 0 , 3, GL_FLOAT, GL_FALSE, sizeof( DebugVertexData ) , 0) );
offset = 3 * sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 1 , 3 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
offset = 3 * sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 2 , 3 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
offset = 3 * sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 3 , 1 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
offset = sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 4 , 1 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
GL_CHECK( glEnableVertexAttribArray(0) );
GL_CHECK( glEnableVertexAttribArray(1) );
GL_CHECK( glEnableVertexAttribArray(2) );
GL_CHECK( glEnableVertexAttribArray(3) );
GL_CHECK( glEnableVertexAttribArray(4) );
GL_CHECK( glBindBuffer( GL_ARRAY_BUFFER , 0 ) );
GL_CHECK( glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , 0 ) );
GL_CHECK( glUseProgram( 0) );
GL_CHECK( glBindVertexArray(0) );
Following is my vertex shader:
#version 410
layout ( location=0 ) in vec3 position;
layout ( location=1 ) in vec3 normal;
layout ( location=2 ) in vec3 eColor;
layout ( location=3 ) in float underRegion;
layout ( location=4 ) in float isSplittable;
out vec4 vPosition;
out vec3 vNormal;
out vec3 vColor;
out float flag1;
out float flag2;
void main()
{
vPosition = vec4( position , 1 );
vNormal = normal;
flag1 = underRegion;
flag2 = isSplittable;
vColor = eColor;
// gl_Position = mvpMatrix * vec4( position , 1 );
}
This is geometry shader:
#version 410
layout( lines ) in;
layout( line_strip , max_vertices = 2 ) out;
uniform mat4 mvMatrix;
uniform mat4 mvpMatrix;
in vec4[ 2 ] vPosition;
in vec3[ 2 ] vNormal;
in vec3[ 2 ] vColor;
in float[ 2 ] flag1;
in float[ 2 ] flag2;
// Output to the fragment shader
out float isEdgeSplittable;
out vec3 edgeColor;
void main()
{
float l = length( vPosition[ 0 ].xyz - vPosition[ 1 ].xyz );
vec4 v1 = vPosition[ 0 ];
vec4 v2 = vPosition[ 1 ];
v1.xyz += vNormal[ 0 ] * l * 0.001;
v2.xyz += vNormal[ 1 ] * l * 0.001;
v1 = mvpMatrix * v1;
v2 = mvpMatrix * v2;
edgeColor = vColor[ 0 ];
gl_Position = v1;
if( flag1[ 0 ] > 0.5 )
{
isEdgeSplittable = 1.0;
}
else
{
isEdgeSplittable = 0.0;
}
EmitVertex();
gl_Position = v2;
if( flag1[ 0 ] > 0.5 )
{
isEdgeSplittable = 1.0;
}
else
{
isEdgeSplittable = 0.0;
}
edgeColor = vColor[ 1 ];
EmitVertex();
EndPrimitive();
}
Following is fragment shader:
#version 410
layout (location = 0) out vec4 color;
in float isEdgeSplittable;
in vec3 edgeColor;
void main()
{
color.xyz = edgeColor;//vec3(0 , 0 , 1) ;//eColor2;
//color.w = 1.0;
if( isEdgeSplittable > 0.5 )
{
color.xyz = vec3( 0 , 0 , 1 );
}
}
Regards
Avanindra

Use normals as colors in OpenGL using assimp

I exported the suzanne model from blender(Monkey head) as a .obj file and I can only see it when I use the RGB values in the fragment shader. e.g. frag_color = vec4( 1.0, 0.0, 0.0, 1.0 ); to make the model red. But it just looks like a deformed texture unless I rotate it
I want to use the normals as colors so that I can see specific details in the face, etc. I bound the normals to vertex position 1.
if ( mesh -> HasNormals() )
{
normals = ( GLfloat * ) malloc( * pointCount * 3 * sizeof( GLfloat ) );
for ( int i = 0; i < * pointCount; i++ )
{
const aiVector3D * vn = &( mesh -> mNormals[ i ] );
normals[ i * 3 ] = ( GLfloat ) vn -> x;
normals[ i * 3 + 1 ] = ( GLfloat ) vn -> y;
normals[ i * 3 + 2 ] = ( GLfloat ) vn -> z;
}
GLuint vbo;
glGenBuffers( 1, &vbo );
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glBufferData( GL_ARRAY_BUFFER, 3 * * pointCount * sizeof( GLfloat ), normals, GL_STATIC_DRAW );
glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, 0, NULL );
glEnableVertexAttribArray( 1 );
free( normals );
}
And I bound 1 to vertex_normal right after attaching the shaders but right before linking.
glAttachShader( program, vertShader );
glAttachShader( program, fragShader );
glBindAttribLocation( program, 0, "vertex_position" );
glBindAttribLocation( program, 1, "vertex_normal" );
glLinkProgram( program );
These are my shaders
vertshader.shader
#version 330
in vec3 vertex_position;
in vec3 vertex_normal;
uniform mat4 proj, view, model;
out vec3 normals;
void main()
{
normals = vertex_normal;
gl_Position = proj * vec4( vec3( view * model * vec4( vertex_position, 1.0 ) ), 1.0 );
}
fragshader.shader
#version 330
in vec3 normals;
out vec4 fragment_color;
void main()
{
fragment_color = vec4( normals, 1.0 );
}
But this only outputs a black screen. I know the model is loading because I can color it red like above. I tried importing vertex_normal directly into the frag shader, that didn't work, I also tried normalizing normals and that didn't change the effect neither.
So how can I use the models normals as colors in the fragment shader?
Ok, I found a fix. Apparently it was blenders fault. There is a side panel on what I want to export with my mesh, and Write normals wasn't checked. Thanks to Reto Koradi, I didn't think it was possible for a mesh to be written without normals.