My diffuse lighting doesn't seem to be working properly.
Fragment Shader:
#version 330 core
out vec4 gl_FragColor;
in vec4 vertexColor;
in vec2 texelCoord;
in vec3 Normal;
struct DirectionalLight
{
vec3 color;
float ambientIntensity;
vec3 direction;
float diffuseIntensity;
};
uniform sampler2D textureSampler;
uniform DirectionalLight directionalLight;
void main()
{
vec4 ambientColor = vec4(directionalLight.color, 1.0f) * directionalLight.ambientIntensity;
float diffuseFactor = max(dot(normalize(Normal), normalize(directionalLight.direction)), 0.0f);
vec4 diffuseColor = vec4(directionalLight.color, 1.0f) * directionalLight.diffuseIntensity * diffuseFactor;
gl_FragColor = texture(textureSampler, texelCoord) * (ambientColor + diffuseColor);
}
Vertex Shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
out vec4 vertexColor;
out vec2 texelCoord;
out vec3 Normal;
uniform mat4 transformation;
uniform mat4 projection;
uniform mat4 view;
void main()
{
gl_Position = projection * view * transformation * vec4( position, 1.0f );
vertexColor = vec4(clamp(position, 0.0f, 1.0f), 1.0f);
texelCoord = texCoord;
Normal = mat3(transpose(inverse(transformation))) * normal;
}
How I create Meshes:
void CalcAverageNormals( unsigned int* indices , unsigned int indicesCount , float* vertices , unsigned int verticesCount , unsigned int vertexLength , unsigned int normalOffset )
{
for ( int i = 0; i < indicesCount; i += 3 )
{
unsigned int v1 = indices[i] * vertexLength;
unsigned int v2 = indices[ i + 1 ] * vertexLength;
unsigned int v3 = indices[ i + 2 ] * vertexLength;
glm::vec3 line1( vertices[ v2 ] - vertices[ v1 ] , vertices[ v2 + 1 ] - vertices[ v1 + 1 ] , vertices[ v2 + 2 ] - vertices[ v1 + 2 ] );
glm::vec3 line2( vertices[ v3 ] - vertices[ v1 ] , vertices[ v3 + 1 ] - vertices[ v1 + 1 ] , vertices[ v3 + 2 ] - vertices[ v1 + 2 ] );
glm::vec3 normal = glm::normalize( glm::cross( line1 , line2 ) );
v1 += normalOffset;
v2 += normalOffset;
v3 += normalOffset;
vertices[ v1 ] += normal.x; vertices[ v1 + 1 ] += normal.y; vertices[ v1 + 2 ] += normal.z;
vertices[ v2 ] += normal.x; vertices[ v2 + 1 ] += normal.y; vertices[ v2 + 2 ] += normal.z;
vertices[ v3 ] += normal.x; vertices[ v3 + 1 ] += normal.y; vertices[ v3 + 2 ] += normal.z;
}
for ( int j = 0; j < verticesCount / vertexLength; j++ )
{
unsigned int offset = j * vertexLength + normalOffset;
glm::vec3 normalVertex( vertices[ offset ] , vertices[ offset + 1 ] , vertices[ offset + 2 ] );
normalVertex = glm::normalize( normalVertex );
vertices[ offset ] = normalVertex.x;
vertices[ offset + 1 ] = normalVertex.y;
vertices[ offset + 2 ] = normalVertex.z;
}
}
void CreateTriangle() {
float vertices[] {
-0.5f,-0.5f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Left
0.5f,-0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Right
0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.0f, 0.0f, 0.0f, // Top
0.0f,-0.5f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f, 0.0f // Back Z
};
unsigned int indices[]{
0, 1, 2, // Front
3, 2, 1, // Right
3, 2, 0, // Left
3, 0, 1 // Bottom
};
CalcAverageNormals( indices , 12 , vertices , 32 , 8 , 5 );
for ( int i = 0; i < 1; i++ )
{
Mesh* obj = new Mesh();
obj->CreateMesh( vertices , 32 , indices , 12 );
meshlist.push_back( obj );
}
}
CreateMesh()
void Mesh::CreateMesh( float* vertices , unsigned int numVertices , unsigned int* indices , unsigned int numIndices )
{
uIndices = numIndices;
glGenVertexArrays( 1 , &vao );
glBindVertexArray( vao );
/*Create Buffers*/
glGenBuffers( 1 , &ibo );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , ibo );
glBufferData( GL_ELEMENT_ARRAY_BUFFER , numIndices * sizeof(unsigned) , indices , GL_STATIC_DRAW );
glGenBuffers( 1 , &vbo );
glBindBuffer( GL_ARRAY_BUFFER , vbo );
glBufferData( GL_ARRAY_BUFFER , numVertices * sizeof(float) , vertices , GL_STATIC_DRAW );
glVertexAttribPointer( 0 , 3 , GL_FLOAT , GL_FALSE , sizeof( vertices[ 0 ] ) * 8 , 0 );
glEnableVertexAttribArray( 0 );
glVertexAttribPointer( 1 , 2 , GL_FLOAT , GL_FALSE , sizeof( vertices[ 0 ] ) * 8 , ( void* )( sizeof( vertices[ 0 ] ) * 3 ) );
glEnableVertexAttribArray( 1 );
glVertexAttribPointer( 2 , 3 , GL_FLOAT , GL_FALSE , sizeof( vertices[ 0 ] ) * 8 , ( void* )( sizeof( vertices[ 0 ] ) * 5 ) );
glEnableVertexAttribArray( 2 );
/*Unbind Objects*/
glBindBuffer( GL_ARRAY_BUFFER , 0 );
glBindVertexArray( 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , 0 );
}
I only get some sort of lighting if I rotate the mesh:
Normal ( No lighting):
I been trying to figure this out for a days but I am not sure what I did wrong. If you could help me that would be great.
It turns out it was my order of indices having problem with triangle winding. I fixed this by drawing the indices in counter clockwise order because of how my diffuse factor is calculated.
unsigned int indices[]{
0, 1, 2, // Front
3, 1, 2, // Right
3, 0, 2, // Left
3, 1, 0 // Bottom
};
The transpose and inverse calculation is to correct non-uniform scale matrices.
this:
Normal = mat3(transpose(inverse(transformation))) * normal;
looks suspicious the normal itself should be transformed into the same coordinate system as the light direction vector is in. As normal is a vector the w should be zero so I would expect either
Normal = view * transformation * vec4( normal, 0.0f );
if the light direction is in camera coordinates or:
Normal = transformation * vec4( normal, 0.0f );
if the light direction is in global world coordinates (which is more likely the case)...
Also setting shader output variable like gl_Position should be the last line of your shader or you risk GL implementation optimize out the rest of code after it on some implementations.
btw. IIRC mat3(transpose(inverse(transformation))) is the same as mat3(transformation) in case the transformation but as Rabbid76 pointed out it has its purpose...
One last thing I can think of is wrong normal direction (opposite or inconsistent) in such case I would try exchange:
max(dot(normalize(Normal), normalize(directionalLight.direction)), 0.0f);
with:
abs(dot(normalize(Normal), normalize(directionalLight.direction));
If it helps you have to check the normals or just negate the dot result...
For more info see:
OpenGL - vertex normals in OBJ ... however there I use w=1 as the matrix used has no offset so it does not matter
Understanding 4x4 homogenous transform matrices
Related
i have a single vao that contains a character set of a font.
Each character is drawn with a set of indices belonging to it.
The idea was to write a couple of chars and bind the vao only one time.
Everything works fine - except the positioning of the glyphs.
The loop is like
glGetUniformLocations()
glBindVertexArray( vao )
for( i=0; i< lg; i++ )
{
glUniforMatrix4fV(); // send translation matrix to shader
glDrawElements( part of the indexbuffer );
trans_mx = glm::translate(); // compute the translation matrix
}
Vertexshader is:
#version 450 core
layout( location = 0 ) in vec3 vx; // the vertex_buffer in modelspace
layout( location = 1 ) in vec4 vx_col; // color of each vertex
out vec4 fragment_color;
uniform mat4 mvp_mx;
uniform mat4 trans_mx;
void main()
{
gl_Position = mvp_mx * vec4( trans_mx * vec4( vx, 1.0f ) );
fragment_color = vx_col;
}
The translation works - but not between the draw calls.
All chars are drawn at the same position. which is translated.
For me it seems as if the translation matrix will not be updated.
??????
this is the code from the function that draws the chars
lg = strlen( str );
vao = sfs->vao;
_sys.mvp_mx = _sys.proj_mx * _sys.view_mx * vao->model_mx; // compute global mvp
sh_s = &__sh_list[ vao->shd_ind ]; // pointer to our shader_struct
sh_s->get_uniform_loc(); // get uniform locations for this shader
glBindVertexArray( vao->id ); // bind vao
glUseProgram( sh_s->prg_id ); // use the shader
for( c1 = 0; c1 < lg; c1++ )
{
sh_s->send_uniform(); // send uniforms to the shader
i_seg = &vao->ind_b->seg[ str[ c1 ] ]; // segment for this character
glDrawElements( i_seg->prim, i_seg->count, i_seg->type, ( void* )i_seg->offset );
_sys.trans_mx = glm::translate( glm::mat4( 1.0f ), glm::vec3( 10.0f, 0.0f, 0.0f ) );
}
glBindVertexArray( 0 ); // unbind vao
glUseProgram( 0 ); // unbind shader
The expression
for( c1 = 0; c1 < lg; c1++ )
{
.....
_sys.trans_mx = glm::translate(glm::mat4(1.0f), glm::vec3(10.0f, 0.0f, 0.0f));
}
will ongoing calculate the same matrix.
If you want to calculate a continuously changing matrix in the loop, then you have to continually change the matrix based on its current value:
for( c1 = 0; c1 < lg; c1++ )
{
.....
_sys.trans_mx = glm::translate(_sys.trans_mx, glm::vec3(10.0f, 0.0f, 0.0f));
}
Or you calculate the matrix dependent on the control variable of the loop
for( c1 = 0; c1 < lg; c1++ )
{
.....
_sys.trans_mx = glm::translate(glm::mat4(1.0f), glm::vec3(10.0f * c1, 0.0f, 0.0f));
}
I have been trying to display a mesh wireframe and pass each edge its own color as vertex attribute array. For that I have used two vertices in the vertex buffer for each edge of the mesh. I could display the edges correctly. The problem is coming for attribute locations 2 , 3 , 4 in the shader , it seems like I am not receiving the values that I loaded in the VBO. And when I just reorder the vertex attribute , say I put the 'eColor' attribute at location 0 , I start getting correct value in the fragment shader. I want to know the reason , why this is happening. I have executed the same code in different environment , windows/mac , with glfw/Qt , everywhere I am facing the the same issue. Can anyone please point my mistake.
Following is the code for vertex attribute binding:
GL_CHECK( glBindVertexArray( mVAO ) );
GL_CHECK( glUseProgram( mDebugProgram ) );
GL_CHECK( glGenBuffers( 2 , mVBO ) );
GL_CHECK( glBindBuffer( GL_ARRAY_BUFFER , mVBO[ 0 ] ) );
GL_CHECK( glBufferData( GL_ARRAY_BUFFER , mDebugVertexData.size() * sizeof( DebugVertexData ) , mDebugVertexData.data() , GL_DYNAMIC_DRAW ) );
GL_CHECK( glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , mVBO[ 1 ] ) );
GL_CHECK( glBufferData( GL_ELEMENT_ARRAY_BUFFER , mDebugWFIndices.size() * sizeof( GLuint ) , mDebugWFIndices.data() , GL_DYNAMIC_DRAW ) );
int offset = 0;
GL_CHECK( glVertexAttribPointer( 0 , 3, GL_FLOAT, GL_FALSE, sizeof( DebugVertexData ) , 0) );
offset = 3 * sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 1 , 3 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
offset = 3 * sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 2 , 3 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
offset = 3 * sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 3 , 1 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
offset = sizeof( GLfloat );
GL_CHECK( glVertexAttribPointer( 4 , 1 , GL_FLOAT, GL_FALSE, sizeof ( DebugVertexData ) , ( float * )offset ) );
GL_CHECK( glEnableVertexAttribArray(0) );
GL_CHECK( glEnableVertexAttribArray(1) );
GL_CHECK( glEnableVertexAttribArray(2) );
GL_CHECK( glEnableVertexAttribArray(3) );
GL_CHECK( glEnableVertexAttribArray(4) );
GL_CHECK( glBindBuffer( GL_ARRAY_BUFFER , 0 ) );
GL_CHECK( glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , 0 ) );
GL_CHECK( glUseProgram( 0) );
GL_CHECK( glBindVertexArray(0) );
Following is my vertex shader:
#version 410
layout ( location=0 ) in vec3 position;
layout ( location=1 ) in vec3 normal;
layout ( location=2 ) in vec3 eColor;
layout ( location=3 ) in float underRegion;
layout ( location=4 ) in float isSplittable;
out vec4 vPosition;
out vec3 vNormal;
out vec3 vColor;
out float flag1;
out float flag2;
void main()
{
vPosition = vec4( position , 1 );
vNormal = normal;
flag1 = underRegion;
flag2 = isSplittable;
vColor = eColor;
// gl_Position = mvpMatrix * vec4( position , 1 );
}
This is geometry shader:
#version 410
layout( lines ) in;
layout( line_strip , max_vertices = 2 ) out;
uniform mat4 mvMatrix;
uniform mat4 mvpMatrix;
in vec4[ 2 ] vPosition;
in vec3[ 2 ] vNormal;
in vec3[ 2 ] vColor;
in float[ 2 ] flag1;
in float[ 2 ] flag2;
// Output to the fragment shader
out float isEdgeSplittable;
out vec3 edgeColor;
void main()
{
float l = length( vPosition[ 0 ].xyz - vPosition[ 1 ].xyz );
vec4 v1 = vPosition[ 0 ];
vec4 v2 = vPosition[ 1 ];
v1.xyz += vNormal[ 0 ] * l * 0.001;
v2.xyz += vNormal[ 1 ] * l * 0.001;
v1 = mvpMatrix * v1;
v2 = mvpMatrix * v2;
edgeColor = vColor[ 0 ];
gl_Position = v1;
if( flag1[ 0 ] > 0.5 )
{
isEdgeSplittable = 1.0;
}
else
{
isEdgeSplittable = 0.0;
}
EmitVertex();
gl_Position = v2;
if( flag1[ 0 ] > 0.5 )
{
isEdgeSplittable = 1.0;
}
else
{
isEdgeSplittable = 0.0;
}
edgeColor = vColor[ 1 ];
EmitVertex();
EndPrimitive();
}
Following is fragment shader:
#version 410
layout (location = 0) out vec4 color;
in float isEdgeSplittable;
in vec3 edgeColor;
void main()
{
color.xyz = edgeColor;//vec3(0 , 0 , 1) ;//eColor2;
//color.w = 1.0;
if( isEdgeSplittable > 0.5 )
{
color.xyz = vec3( 0 , 0 , 1 );
}
}
Regards
Avanindra
I exported the suzanne model from blender(Monkey head) as a .obj file and I can only see it when I use the RGB values in the fragment shader. e.g. frag_color = vec4( 1.0, 0.0, 0.0, 1.0 ); to make the model red. But it just looks like a deformed texture unless I rotate it
I want to use the normals as colors so that I can see specific details in the face, etc. I bound the normals to vertex position 1.
if ( mesh -> HasNormals() )
{
normals = ( GLfloat * ) malloc( * pointCount * 3 * sizeof( GLfloat ) );
for ( int i = 0; i < * pointCount; i++ )
{
const aiVector3D * vn = &( mesh -> mNormals[ i ] );
normals[ i * 3 ] = ( GLfloat ) vn -> x;
normals[ i * 3 + 1 ] = ( GLfloat ) vn -> y;
normals[ i * 3 + 2 ] = ( GLfloat ) vn -> z;
}
GLuint vbo;
glGenBuffers( 1, &vbo );
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glBufferData( GL_ARRAY_BUFFER, 3 * * pointCount * sizeof( GLfloat ), normals, GL_STATIC_DRAW );
glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, 0, NULL );
glEnableVertexAttribArray( 1 );
free( normals );
}
And I bound 1 to vertex_normal right after attaching the shaders but right before linking.
glAttachShader( program, vertShader );
glAttachShader( program, fragShader );
glBindAttribLocation( program, 0, "vertex_position" );
glBindAttribLocation( program, 1, "vertex_normal" );
glLinkProgram( program );
These are my shaders
vertshader.shader
#version 330
in vec3 vertex_position;
in vec3 vertex_normal;
uniform mat4 proj, view, model;
out vec3 normals;
void main()
{
normals = vertex_normal;
gl_Position = proj * vec4( vec3( view * model * vec4( vertex_position, 1.0 ) ), 1.0 );
}
fragshader.shader
#version 330
in vec3 normals;
out vec4 fragment_color;
void main()
{
fragment_color = vec4( normals, 1.0 );
}
But this only outputs a black screen. I know the model is loading because I can color it red like above. I tried importing vertex_normal directly into the frag shader, that didn't work, I also tried normalizing normals and that didn't change the effect neither.
So how can I use the models normals as colors in the fragment shader?
Ok, I found a fix. Apparently it was blenders fault. There is a side panel on what I want to export with my mesh, and Write normals wasn't checked. Thanks to Reto Koradi, I didn't think it was possible for a mesh to be written without normals.
I'm having a bit of an odd problem. I'm trying to render some data with OpenGL on my Windows system. I found a set of tutorials at opengl-tutorial.org which were written for OpenGL 3.3. As my laptop (where I do a great deal of developing) only supports OpenGL 2.1, I proceeded to download the OpenGL 2.1 port of the tutorial. I messed around with it a bit, adding features and refactoring it for scalability, but noticed something odd. Whenever I rendered my data with Vertex Buffer Objects, I got a rather incorrect representation of my data. This is shown below.
http://www.majhost.com/gallery/DagonEcelstraun/Others/HelpNeeded/badrender.png
However, when I specify my data using glVertex3fv and such, I get a much nicer result, again shown below.
http://www.majhost.com/gallery/DagonEcelstraun/Others/HelpNeeded/goodrender.png
The problem occurs both on my Windows 8.1 laptop with Intel i3 integrated graphics and on my Windows 7 desktop with its nVidia GTX 660, so it's not a hardware problem. Does anyone know what may be the issue here?
Loading mesh data:
const aiScene *scene = aiImportFile( sName.c_str(),
aiProcessPreset_TargetRealtime_MaxQuality | aiProcess_FlipUVs );
const aiMesh *mesh = scene->mMeshes[0];
for( int i = 0; i < mesh->mNumVertices; i++ ) {
meshData.push_back( mesh->mVertices[i][0] );
meshData.push_back( mesh->mVertices[i][1] );
meshData.push_back( mesh->mVertices[i][2] );
meshData.push_back( mesh->mNormals[i][0] );
meshData.push_back( mesh->mNormals[i][1] );
meshData.push_back( mesh->mNormals[i][2] );
meshData.push_back( mesh->mTextureCoords[0][i][0] );
meshData.push_back( mesh->mTextureCoords[0][i][1] );
meshData.push_back( 0 );
meshData.push_back( mesh->mTangents[i][0] );
meshData.push_back( mesh->mTangents[i][1] );
meshData.push_back( mesh->mTangents[i][2] );
}
for( int i = 0; i < mesh->mNumFaces; i++ ) {
for( int j = 0; j < 3; j++ ) {
indices.push_back( mesh->mFaces[i].mIndices[j] );
}
}
Sending data to the graphics card for the first time (called right after previous code):
glGenBuffers( 1, &glVertData );
glBindBuffer( GL_ARRAY_BUFFER, glVertData );
glBufferData( GL_ARRAY_BUFFER, meshData.size() * sizeof( GLfloat ), &meshData[0], GL_STATIC_DRAW );
// Generate a buffer for the indices as well
glGenBuffers( 1, &glIndexes );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, glIndexes );
glBufferData( GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW );
Rendering the mesh:
//Tell the shader to use our data
//bindVerts, bindUvs, bindNorms, and bindTangents refer to attribute variables in my shader
//vertexPosition_modelspace, vertexUV, vertexNormal_modelspace, and vertexTangent_modelspace, respectively.
this->verts = bindVerts;
this->uvs = bindUvs;
this->norms = bindNorms;
this->tangents = bindTangents;
glEnableVertexAttribArray( verts );
glEnableVertexAttribArray( uvs );
glEnableVertexAttribArray( norms );
glEnableVertexAttribArray( tangents );
//Specify how the graphics card should decode our data
// 1rst attribute buffer : vertices
glBindBuffer( GL_ARRAY_BUFFER, glVertData );
glVertexAttribPointer( verts, 3, GL_FLOAT, GL_FALSE, 12, (void*) 0 );
// 2nd attribute buffer : normals
glVertexAttribPointer( norms, 3, GL_FLOAT, GL_FALSE, 12, (void*) 3 );
//3rd attribute buffer : UVs
glVertexAttribPointer( uvs, 3, GL_FLOAT, GL_FALSE, 12, (void*) 6 );
//4th attribute buffer: tangents
glVertexAttribPointer( tangents, 3, GL_FLOAT, GL_FALSE, 12, (void*) 9 );
// Index buffer
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, glIndexes );
//rendering the mesh with VBOs:
glDrawElements( GL_LINES, indices.size(), GL_UNSIGNED_SHORT, (void*) 0 );
//specifying the vertex data individually:
glBegin( GL_TRIANGLES );
int ind;
for( int i = 0; i < indices.size(); i++ ) {
ind = indices[i] * 12;
glNormal3fv( &meshData[ind + 3] );
glTexCoord2fv( &meshData[ind + 6] );
glVertex3fv( &meshData[ind] );
}
glEnd();
//clean up after the render
glDisableVertexAttribArray( verts );
glDisableVertexAttribArray( uvs );
glDisableVertexAttribArray( norms );
glDisableVertexAttribArray( tangents );
My vertex shader:
#version 130
// Input vertex data, different for all executions of this shader.
//it doesn't work, so we'll just get rid of it
attribute vec3 vertexPosition_modelspace;
attribute vec3 vertexUV;
attribute vec3 vertexNormal_modelspace;
attribute vec3 vertexTangent_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out vec4 ShadowCoord;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightInvDirection_worldspace;
uniform mat4 DepthBiasMVP;
uniform sampler2D normalMap;
attribute vec3 vTangent;
void main() {
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4( vertexPosition_modelspace, 1 );
ShadowCoord = DepthBiasMVP * vec4( vertexPosition_modelspace, 0 );
// Position of the vertex, in worldspace : M * position
Position_worldspace = ( M * vec4( vertexPosition_modelspace, 0 ) ).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
EyeDirection_cameraspace = vec3( 0, 0, 0 ) - ( V * M * vec4( vertexPosition_modelspace, 0 ) ).xyz;
// Vector that goes from the vertex to the light, in camera space
LightDirection_cameraspace = ( V * vec4( LightInvDirection_worldspace, 0 ) ).xyz;
// UV of the vertex. No special space for this one.
UV = vertexUV.st;
// Normal of the the vertex, in camera space
// Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
Normal_cameraspace = ( V * M * vec4( vertexNormal_modelspace.xyz, 0 ) ).xyz;
}
Fragment shader:
#version 130
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
in vec4 ShadowCoord;
out vec4 fragColor;
// Values that stay constant for the whole mesh.
uniform sampler2D diffuse;
uniform mat4 MV;
uniform vec3 LightPosition_worldspace;
uniform sampler2D shadowMap;
//uniform int shadowLevel; //0 is no shadow, 1 is hard shadows, 2 is soft shadows, 3 is PCSS
// Returns a random number based on a vec3 and an int.
float random( vec3 seed, int i ) {
vec4 seed4 = vec4( seed, i );
float dot_product = dot( seed4, vec4( 12.9898, 78.233, 45.164, 94.673 ) );
return fract( sin( dot_product ) * 43758.5453 );
}
int mod( int a, int b ) {
return a - (a / b);
}
void main() {
int shadowLevel = 1; //let's just do hard shadows
// Light emission properties
vec3 LightColor = vec3( 1, 1, 1 );
float LightPower = 1.0f;
// Material properties
vec3 MaterialDiffuseColor = texture( diffuse, UV ).rgb;
vec3 MaterialAmbientColor = vec3( 0.1, 0.1, 0.1 ) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3( 0.3, 0.3, 0.3 );
vec3 n = normalize( Normal_cameraspace );
vec3 l = normalize( LightDirection_cameraspace );
float cosTheta = clamp( dot( n, l ), 0.2, 1 );
// Eye vector (towards the camera)
vec3 E = normalize( EyeDirection_cameraspace );
// Direction in which the triangle reflects the light
vec3 R = reflect( -l, n );
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E, R ), 0, 1 );
float visibility = 1.0;
//variable bias
float bias = 0.005 * tan( acos( cosTheta ) );
bias = clamp( bias, 0, 0.01 );
// dFragment to the light
float dFragment = ( ShadowCoord.z-bias ) / ShadowCoord.w;
float dBlocker = 0;
float penumbra = 1;
float wLight = 5.0;
if( shadowLevel == 3 ) {
// Sample the shadow map 8 times
float count = 0;
float temp;
float centerBlocker = texture( shadowMap, ShadowCoord.xy).r;
float scale = (wLight * (dFragment - centerBlocker)) / dFragment;
for( int i = 0; i < 16; i++ ) {
temp = texture( shadowMap, ShadowCoord.xy + (scale * poissonDisk( i ) / 50.0) ).r;
if( temp < dFragment ) {
dBlocker += temp;
count += 1;
}
}
if( count > 0 ) {
dBlocker /= count;
penumbra = wLight * (dFragment - dBlocker) / dFragment;
}
}
if( shadowLevel == 1 ) {
if( texture( shadowMap, ShadowCoord.xy).r < dFragment ) {
visibility -= 0.8;
}
} else if( shadowLevel > 1 ) {
float iterations = 32;
float sub = 0.8f / iterations;
for( int i = 0; i < iterations; i++ ) {
int index = mod( int( 32.0 * random( gl_FragCoord.xyy, i ) ), 32 );
if( texture( shadowMap, ShadowCoord.xy + (penumbra * poissonDisk( index ) / 250.0) ).r < dFragment ) {
visibility -= sub;
}
}
}
visibility = min( visibility, cosTheta );
//MaterialDiffuseColor = vec3( 0.8, 0.8, 0.8 );
fragColor.rgb = MaterialAmbientColor +
visibility * MaterialDiffuseColor * LightColor * LightPower +
visibility * MaterialSpecularColor * LightColor * LightPower * pow( cosAlpha, 5 );
}
Note that poissonDisk( int ind ) returns a vec2 with a magnitude of no more than 1 which is in a poisson disk distribution. Even though I'm using shader version 130, I used a function and not an array because the array runs rather slowly on my laptop.
I do bind that shader before I do any rendering. I also make sure to upload the correct variables to all of my uniforms, but I didn't show that to save space since I know it's working correctly.
Does anyone know what's causing this incorrect render?
Well, first of all, stop drawing the VBO using GL_LINES. Use the same primitive mode for immediate mode and VBO drawing.
Also, since when is 3*4 = 3? The address (offset) in your VBO vertex pointers should be the number of elements multiplied by the size of the data type when using an interleaved data structure. GL_FLOAT is 4 bytes, if you have a 3-component vertex position this means that the offset to the next field in your VBO is 3*4 = (void *)12, not (void *)3. This process must continue for each additional vertex array pointer, they all use incorrect offsets.
Likewise, the stride of your VBO should be 12 * sizeof (GLfloat) = 48, not 12.
In my OpenGL app, it won't let me draw a line greater then ten pixels wide. Is there a way to make it draw more than ten pixels?
void OGL_Renderer::drawLine(int x, int y, int x2, int y2, int r, int g, int b, int a, int line_width)
{
glColor4ub(r, g, b, a);
glLineWidth((GLfloat)line_width);
glBegin(GL_LINES);
glVertex2i(x, y);
glVertex2i(x2, y2);
glEnd();
glLineWidth(1.0f);
}
I recommend to use a Shader, which generates triangle primitives along a line strip (or even a line loop).
The task is to generate thick line strip, with as less CPU and GPU overhead as possible. That means to avoid computation of polygons on the CPU as well as geometry shaders (or tessellation shaders).
Each segment of the line consist of a quad represented by 2 triangle primitives respectively 6 vertices.
0 2 5
+-------+ +
| / / |
| / / |
| / / |
+ +-------+
1 3 4
Between the line segments the miter hast to be found and the quads have to be cut to the miter.
+----------------+
| / |
| segment 1 / |
| / |
+--------+ |
| segment 2
| |
| |
+-------+
Create an array with the corners points of the line strip. The first and the last point define the start and end tangents of the line strip. So you need to add 1 point before the line and one point after the line. Of course it would be easy, to identify the first and last element of the array by comparing the index to 0 and the length of the array, but we don't want to do any extra checks in the shader.
If a line loop has to be draw, then the last point has to be add to the array head and the first point to its tail.
The array of points is stored to a Shader Storage Buffer Object. We use the benefit, that the last variable of the SSBO can be an array of variable size. In older versions of OpenGL (or OpenGL ES) a Uniform Buffer Object or even a Texture can be used.
The shader doesn't need any vertex coordinates or attributes. All we have to know is the index of the line segment. The coordinates are stored in the buffer. To find the index we make use of the the index of the vertex currently being processed (gl_VertexID).
To draw a line strip with N points (N-1 segments), 6*(N-1) vertices have tpo be processed.
We have to create an "empty" Vertex Array Object (without any vertex attribute specification):
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
And to draw 2*(N-1) triangle (6*(N-1) vertices):
glDrawArrays(GL_TRIANGLES, 0, 6*(N-1));
For the coordinate array in the SSBO, the data type vec4 is used (Pleas believe me, you don't want to use vec3):
layout(std430, binding = 0) buffer TVertex
{
vec4 vertex[];
};
Compute the index of the line segment, where the vertex coordinate belongs too and the index of the point in the 2 triangles:
int line_i = gl_VertexID / 6;
int tri_i = gl_VertexID % 6;
Since we are drawing N-1 line segments, but the number of elements in the array is N+2, the elements form vertex[line_t] to vertex[line_t+3] can be accessed for each vertex which is processed in the vertex shader.
vertex[line_t+1] and vertex[line_t+2] are the start respectively end coordinate of the line segment. vertex[line_t] and vertex[line_t+3] are required to compute the miter.
The thickness of the line should be set in pixel unit (uniform float u_thickness). The coordinates have to be transformed from model space to window space. For that the resolution of the viewport has to be known (uniform vec2 u_resolution). Don't forget the perspective divide. The drawing of the line will even work at perspective projection.
vec4 va[4];
for (int i=0; i<4; ++i)
{
va[i] = u_mvp * vertex[line_i+i];
va[i].xyz /= va[i].w;
va[i].xy = (va[i].xy + 1.0) * 0.5 * u_resolution;
}
The miter and the start and end tangents are calculated from the vectors between the points. It would be a waste of performance to test the points in the vertex shader for equality or for vectors of zero length. It is up to the vertex setup to take care of a proper list of points.
However the miter calculation even works if the predecessor and successor point of a point are equal. In this case the end of the line is cut normal to the line segemnt or tangent:
vec2 v_line = normalize(va[2].xy - va[1].xy);
vec2 nv_line = vec2(-v_line.y, v_line.x);
vec2 v_pred = normalize(va[1].xy - va[0].xy);
vec2 v_succ = normalize(va[3].xy - va[2].xy);
vec2 v_miter1 = normalize(nv_line + vec2(-v_pred.y, v_pred.x));
vec2 v_miter2 = normalize(nv_line + vec2(-v_succ.y, v_succ.x));
In the final vertex shader we just need to calculate either v_miter1 or v_miter2 dependent on the tri_i. With the miter, the normal vector to the line segment and the line thickness (u_thickness), the vertex coordinate can be computed:
vec4 pos;
if (tri_i == 0 || tri_i == 1 || tri_i == 3)
{
vec2 v_pred = normalize(va[1].xy - va[0].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_pred.y, v_pred.x));
pos = va[1];
pos.xy += v_miter * u_thickness * (tri_i == 1 ? -0.5 : 0.5) / dot(v_miter, nv_line);
}
else
{
vec2 v_succ = normalize(va[3].xy - va[2].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_succ.y, v_succ.x));
pos = va[2];
pos.xy += v_miter * u_thickness * (tri_i == 5 ? 0.5 : -0.5) / dot(v_miter, nv_line);
}
Finally the window coordinates have to be transformed back to clip space coordinates. Transform from window space to normalized device space. The perspective divide has to be reversed:
pos.xy = pos.xy / u_resolution * 2.0 - 1.0;
pos.xyz *= pos.w;
The shader can generate the following polygons (rendered with glPolygonMode(GL_FRONT_AND_BACK, GL_LINE))
(with default mode - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL))
For the following simple demo program I've used the GLFW API for creating a window, GLEW for loading OpenGL and GLM -OpenGL Mathematics for the math. I don't provide the code for the function CreateProgram, which just creates a program object, from the vertex shader and fragment shader source code:
#include <vector>
#include <string>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <gl/gl_glew.h>
#include <GLFW/glfw3.h>
std::string vertShader = R"(
#version 460
layout(std430, binding = 0) buffer TVertex
{
vec4 vertex[];
};
uniform mat4 u_mvp;
uniform vec2 u_resolution;
uniform float u_thickness;
void main()
{
int line_i = gl_VertexID / 6;
int tri_i = gl_VertexID % 6;
vec4 va[4];
for (int i=0; i<4; ++i)
{
va[i] = u_mvp * vertex[line_i+i];
va[i].xyz /= va[i].w;
va[i].xy = (va[i].xy + 1.0) * 0.5 * u_resolution;
}
vec2 v_line = normalize(va[2].xy - va[1].xy);
vec2 nv_line = vec2(-v_line.y, v_line.x);
vec4 pos;
if (tri_i == 0 || tri_i == 1 || tri_i == 3)
{
vec2 v_pred = normalize(va[1].xy - va[0].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_pred.y, v_pred.x));
pos = va[1];
pos.xy += v_miter * u_thickness * (tri_i == 1 ? -0.5 : 0.5) / dot(v_miter, nv_line);
}
else
{
vec2 v_succ = normalize(va[3].xy - va[2].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_succ.y, v_succ.x));
pos = va[2];
pos.xy += v_miter * u_thickness * (tri_i == 5 ? 0.5 : -0.5) / dot(v_miter, nv_line);
}
pos.xy = pos.xy / u_resolution * 2.0 - 1.0;
pos.xyz *= pos.w;
gl_Position = pos;
}
)";
std::string fragShader = R"(
#version 460
out vec4 fragColor;
void main()
{
fragColor = vec4(1.0);
}
)";
GLuint CreateSSBO(std::vector<glm::vec4> &varray)
{
GLuint ssbo;
glGenBuffers(1, &ssbo);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo );
glBufferData(GL_SHADER_STORAGE_BUFFER, varray.size()*sizeof(*varray.data()), varray.data(), GL_STATIC_DRAW);
return ssbo;
}
int main(void)
{
if ( glfwInit() == 0 )
return 0;
GLFWwindow *window = glfwCreateWindow( 800, 600, "GLFW OGL window", nullptr, nullptr );
if ( window == nullptr )
{
glfwTerminate();
retturn 0;
}
glfwMakeContextCurrent(window);
if ( glewInit() != GLEW_OK )
return 0;
GLuint program = CreateProgram(vertShader, fragShader);
GLint loc_mvp = glGetUniformLocation(program, "u_mvp");
GLint loc_res = glGetUniformLocation(program, "u_resolution");
GLint loc_thi = glGetUniformLocation(program, "u_thickness");
glUseProgram(program);
glUniform1f(loc_thi, 20.0);
GLushort pattern = 0x18ff;
GLfloat factor = 2.0f;
glm::vec4 p0(-1.0f, -1.0f, 0.0f, 1.0f);
glm::vec4 p1(1.0f, -1.0f, 0.0f, 1.0f);
glm::vec4 p2(1.0f, 1.0f, 0.0f, 1.0f);
glm::vec4 p3(-1.0f, 1.0f, 0.0f, 1.0f);
std::vector<glm::vec4> varray1{ p3, p0, p1, p2, p3, p0, p1 };
GLuint ssbo1 = CreateSSBO(varray1);
std::vector<glm::vec4> varray2;
for (int u=-8; u <= 368; u += 8)
{
double a = u*M_PI/180.0;
double c = cos(a), s = sin(a);
varray2.emplace_back(glm::vec4((float)c, (float)s, 0.0f, 1.0f));
}
GLuint ssbo2 = CreateSSBO(varray2);
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
//glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glm::mat4(project);
int vpSize[2]{0, 0};
while (!glfwWindowShouldClose(window))
{
int w, h;
glfwGetFramebufferSize(window, &w, &h);
if (w != vpSize[0] || h != vpSize[1])
{
vpSize[0] = w; vpSize[1] = h;
glViewport(0, 0, vpSize[0], vpSize[1]);
float aspect = (float)w/(float)h;
project = glm::ortho(-aspect, aspect, -1.0f, 1.0f, -10.0f, 10.0f);
glUniform2f(loc_res, (float)w, (float)h);
}
glClear(GL_COLOR_BUFFER_BIT);
glm::mat4 modelview1( 1.0f );
modelview1 = glm::translate(modelview1, glm::vec3(-0.6f, 0.0f, 0.0f) );
modelview1 = glm::scale(modelview1, glm::vec3(0.5f, 0.5f, 1.0f) );
glm::mat4 mvp1 = project * modelview1;
glUniformMatrix4fv(loc_mvp, 1, GL_FALSE, glm::value_ptr(mvp1));
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo1);
GLsizei N1 = (GLsizei)varray1.size()-2;
glDrawArrays(GL_TRIANGLES, 0, 6*(N1-1));
glm::mat4 modelview2( 1.0f );
modelview2 = glm::translate(modelview2, glm::vec3(0.6f, 0.0f, 0.0f) );
modelview2 = glm::scale(modelview2, glm::vec3(0.5f, 0.5f, 1.0f) );
glm::mat4 mvp2 = project * modelview2;
glUniformMatrix4fv(loc_mvp, 1, GL_FALSE, glm::value_ptr(mvp2));
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo2);
GLsizei N2 = (GLsizei)varray2.size()-2;
glDrawArrays(GL_TRIANGLES, 0, 6*(N2-1));
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
return 0;
}
You could try drawing a quad. Make it as wide as you want your line to be long, and tall as the line width you need, then rotate and position it where the line would go.
Ah, now that I understood what you meant:
draw a one by one square.
calc the length and orientation of the line
stretch it to the length in x
translate to startpos and rotate to line_orientation
or:
get vector of line: v :(x2 - x1, y2 - y1)
normalize v: n
3- get orthogonal (normal) of the vector : o (easy in 2d)
add and subtract o from the line's end and start point to get 4 corner points
draw a quad with these points.
It makes sense that you can't. From the glLineWidth reference:
The range of supported widths and the size difference between supported widths within the range can be queried by calling glGet with arguments GL_LINE_WIDTH_RANGE and GL_LINE_WIDTH_GRANULARITY.