Point Sprite Alpha Blending Issue - c++

I'm trying to render a load of stars in 3D space using point sprites. My texture image is definitely 'good' as far as the alpha channel is concerned. It renders perfectly fine as a quad but when I render a lot of point sprites, I can see the square border of the image overwriting some of the images.
The above image shows the star being rendered nicely over the top of my cube. In the bottom right I would expect to see a continuous trail of star images in the 'flower' pattern that they're been drawn in.
What am I doing wrong?
Fragment shader:
#version 140
#extension GL_ARB_explicit_attrib_location : enable
precision mediump float;
uniform sampler2D uTexture;
void main( void )
{
gl_FragColor = texture2D( uTexture, gl_PointCoord );
}
Vertex shader:
#version 140
#extension GL_ARB_explicit_attrib_location : enable
precision mediump float;
layout (location = 0) in float aTheta;
uniform float uK;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main( void )
{
float x = cos( uK * aTheta ) * sin( aTheta );
float y = cos( uK * aTheta ) * cos( aTheta );
gl_Position = projection * view * model * vec4( x, y, 0.0, 1.0 );
gl_PointSize = 64.0;
}
Code:
void CPoint::Render( void )
{
g_PointProgram.UseProgram();
glEnable( GL_PROGRAM_POINT_SIZE );
glEnable( GL_POINT_SPRITE );
// Set the alpha blending function
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glBlendEquation( GL_FUNC_ADD );
glBindBuffer( GL_ARRAY_BUFFER, m_VBO );
glm::mat4 Model = glm::mat4( 1.0 );
Model = glm::translate( Model, glm::vec3( 2.0f, 0.0f, 4.0f ) );
glm::mat4 Projection = g_Camera.GetProjection();
glm::mat4 View = g_Camera.GetView();
g_PointProgram.SetUint( "uTexture", g_ImgStar.m_Texture );
g_PointProgram.SetMatrix4fv( "model", Model );
g_PointProgram.SetMatrix4fv( "view", View );
g_PointProgram.SetMatrix4fv( "projection", Projection );
g_PointProgram.SetFloat( "uK", m_Emitter.k );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, g_ImgStar.m_Texture );
glEnable( GL_TEXTURE_2D );
// Attributes
glVertexAttribPointer( 0, // 1st attribute array (only have 1)
1, // One theta angle per particle
GL_FLOAT, // Data is floating point type
GL_FALSE, // No fixed point scaling
sizeof( Particle ), // No gaps in data
(void*) 0 ); // Start from "theta" offset within bound buffer
glEnableVertexAttribArray( 0 );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableVertexAttribArray( 0 );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glDisable( GL_TEXTURE_2D );
}

Perhaps depth culling is playing tricks on you? Try glDepthMask(false) while rendering the sprites

Related

OpenGL: How do I sort points depending on the camera distance?

I have a structure made of 100.000 spheres as point-sprites using OpenGL. I have an issue when I rotate the structure on its centre axis.
The point-sprite are rendered in order depending on their array, that means, the last ones overlap the first point-sprite created, not taking care of the depth in the three-dimensional space.
How can I sort and rearrange in real-time the order of the point-sprites to keep always the three-dimensional perspective? I guess the idea is to read the camera position against the particles and then sort the array to always show closer particles first.
Is it possible to be fixed using shaders?
Here is my shader:
shader.frag
#version 120
uniform sampler2D tex;
varying vec4 inColor;
//uniform vec3 lightDir;
void main (void) {
gl_FragColor = texture2D(tex, gl_TexCoord[0].st) * inColor;
}
shader vert
#version 120
// define a few constants here, for faster rendering
attribute float particleRadius;
attribute vec4 myColor;
varying vec4 inColor;
void main(void)
{
vec4 eyeCoord = vec4(gl_ModelViewMatrix * gl_Vertex);
gl_Position = gl_ProjectionMatrix * eyeCoord;
float distance = length(eyeCoord);
float attenuation = 700.0 / distance;
gl_PointSize = particleRadius * attenuation;
//gl_PointSize = 1.0 / distance * SIZE;
//gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_FrontColor = gl_Color;
inColor = myColor;
}
draw method:
void MyApp::draw(){
//gl::clear( ColorA( 0.0f, 0.0f, 0.0f, 0.0f ), true );
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
// SET MATRICES TO WINDOW
gl::setMatricesWindow( getWindowSize(), false );
gl::setViewport( getWindowBounds() );
gl::enableAlphaBlending();
gl::enable( GL_TEXTURE_2D );
gl::enable(GL_ALPHA_TEST);
glEnable(GL_DEPTH_TEST);
gl::color( ColorA( 1.0f, 1.0f, 1.0f, 1.0f ) );
mShader.bind();
// store current OpenGL state
glPushAttrib( GL_POINT_BIT | GL_ENABLE_BIT );
// enable point sprites and initialize it
gl::enable( GL_POINT_SPRITE_ARB );
glPointParameterfARB( GL_POINT_FADE_THRESHOLD_SIZE_ARB, -1.0f );
glPointParameterfARB( GL_POINT_SIZE_MIN_ARB, 0.1f );
glPointParameterfARB( GL_POINT_SIZE_MAX_ARB, 200.0f );
// allow vertex shader to change point size
gl::enable( GL_VERTEX_PROGRAM_POINT_SIZE );
GLint thisColor = mShader.getAttribLocation( "myColor" );
glEnableVertexAttribArray(thisColor);
glVertexAttribPointer(thisColor,4,GL_FLOAT,true,0,theColors);
GLint particleRadiusLocation = mShader.getAttribLocation( "particleRadius" );
glEnableVertexAttribArray(particleRadiusLocation);
glVertexAttribPointer(particleRadiusLocation, 1, GL_FLOAT, true, 0, mRadiuses);
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, mPositions);
mTexture.enableAndBind();
glDrawArrays( GL_POINTS, 0, mNumParticles );
mTexture.unbind();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableVertexAttribArrayARB(thisColor);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableVertexAttribArrayARB(particleRadiusLocation);
// unbind shader
mShader.unbind();
// restore OpenGL state
glPopAttrib();
}
You have two different blending cases in void MyApp::draw()
additive - (src + dst)
Order independent
alpha - (src * src.a + (dst * (1.0 - src.a))
Order depdendent
The first blending function would not cause the issues you are discussing, so I am assuming that mRoom.isPowerOn() == false and that we are dealing with alpha blending.
To solve order dependency issues with the latter case you need to transform your points into eye-space and sort using their z coordinates. The problem here is that this is not something that can be easily solved in GLSL - you need to sort the data before your vertex shader runs (so the most straight-forward approach involves doing this on the CPU). GPU-based solutions are possible and may be necessary to do this in real-time given the huge number of data points involved, but you should start out by doing this on the CPU and figure out where to go from there.
When implementing the sort, keep in mind that point sprites are always screen aligned (uniform z value in eye-space), so you do not have to worry about intersection (a point sprite will either be completely in-front of, behind, or parallel to any other point sprite it overlaps). This makes sorting them a lot simpler than other types of geometry, which may have to be split at points of intersection and drawn twice for proper ordering.

OpenGL 4.0 Cubemap issues

Been reading 'OpenGL 4.0 Shading Language Cookbook'. But I've run into a wall with the cubemap tutorial.
The issue is that model I'm drawing appears completely grey. As if it's not getting any data from the samplerCube texture.
All my code seems to be correct. I've looked at other tutorials and it's the same thing.
Don't know if my Intel HD Graphics 4000 is responsible, but I have made certain that I do have the GL_ARB_texture_cube_map extension.
I'm using the DevIL library for loading images from file, which it seems to do just fine, but from what I can tell something is going wrong in transferring the data to OpenGL.
I'm posting the loading where I get the data from the files. All files are loading correctly as well.
I'm also posting the drawing code, where I bind the texture to the pipeline.
And I'm also posting my vertex and fragment shader just in case, but they do appear to be working as they should.
Any ideas?
Loading code
uint TARGETS[6] =
{
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
string EXTS[6] =
{
"posx",
"negx",
"posy",
"negy",
"posz",
"negz"
};
// Create & bind cubemap texture
glGenTextures( 1, &cubemap );
glBindTexture( GL_TEXTURE_CUBE_MAP, cubemap );
for( int i = 0; i < 6; i++ )
{
string file = "textures/cubemap_" + EXTS[i] + ".png";
uint image = ilGenImage();
// Load with DevIL
ilBindImage( image );
if( !ilLoadImage( file.c_str() ) )
{
cout << "ERROR: Failed to load image " << endl;
return false;
}
// Fetch info from DevIL
int width = ilGetInteger( IL_IMAGE_WIDTH );
int height = ilGetInteger( IL_IMAGE_HEIGHT );
uint format = ilGetInteger( IL_IMAGE_FORMAT );
uint type = ilGetInteger( IL_IMAGE_TYPE );
// Send data to OpenGL
glTexImage2D(
TARGETS[i],
0,
GL_RGBA,
width,
height,
0,
format,
type,
ilGetData() );
// Error check
if( !ErrorCheck("Failed to bind a side of the cubemap!") )
return false;
// Get rid of DevIL data
ilDeleteImage( image );
}
// Parameters
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE );
Draw code
// Update
glfwPollEvents();
UpdateTime();
// Clear back buffer for new frame
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
// Bind shader
shader->Bind();
// Cubemap
shader->SetUniform( "cubemapTexture", 0 );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_CUBE_MAP, cubemap );
// Bind model
if( model->Bind() )
{
static float angle = 0;
angle += 25.0f * deltaTime;
// Matrices
mat4 world =
translate( vec3( 0.0f, 0.0f, 0.0f) ) *
rotateZ( angle * PI / 180 ) *
rotateX( angle * PI / 180 ) *
scale( vec3( 1.0f, 1.0f, 1.0f) );
mat4 view = ViewMatrix(
cameraPosition,
cameraTarget,
vec3( 0.0f, 0.0f, 1.0f) );
mat4 proj = ProjectionMatrix(
fov,
(float)windowX,
(float)windowY,
nearPlane,
farPlane );
// Uniforms
shader->SetUniform( "uWorld", world );
shader->SetUniform( "uView", view );
shader->SetUniform( "uProj", proj );
shader->SetUniform( "materialColor", vec3( 0.5f, 0.5f, 0.5f ) );
shader->SetUniform( "drawSkybox", false );
shader->SetUniform( "world_cameraPosition", cameraPosition );
shader->SetUniform( "reflectFactor", 0.5f );
// Draw
glDrawElements( GL_TRIANGLES, model->GetIndexCount(), GL_UNSIGNED_SHORT, NULL );
}
// Put the new image on the screen
glfwSwapBuffers( window );
Vertex Shader
#version 400
layout(location=0) in vec3 vertex_position;
layout(location=1) in vec3 vertex_normal;
layout(location=2) in vec4 vertex_tangent;
layout(location=3) in vec2 vertex_texCoords;
out vec2 texCoords;
out vec3 reflectDir;
uniform mat4 uWorld;
uniform mat4 uView;
uniform mat4 uProj;
uniform bool drawSkybox;
uniform vec3 world_cameraPosition;
void main()
{
if( drawSkybox )
{
reflectDir = vertex_position;
}
else
{
vec3 world_pos = vec3( uWorld * vec4(vertex_position,1.0) );
vec3 world_norm = vec3( uWorld * vec4(vertex_normal,0.0) );
vec3 world_view = normalize( world_cameraPosition - world_pos );
reflectDir = reflect( -world_view, world_norm );
}
gl_Position = uProj * uView * uWorld * vec4(vertex_position,1.0);
texCoords = vertex_texCoords;
}
Fragment shader
#version 400
out vec4 fragColor;
in vec2 texCoords;
in vec3 reflectDir;
uniform samplerCube cubemapTexture;
uniform vec3 materialColor;
uniform bool drawSkybox;
uniform float reflectFactor;
void main()
{
vec3 color = texture( cubemapTexture, reflectDir ).rgb;
if( drawSkybox )
{
fragColor = vec4( color, 1.0 );
}
else
{
fragColor = vec4( mix( materialColor, color, reflectFactor ), 1.0 );
}
}
Your cube map texture is not texture complete. All 6 sides need to be specified for a cube map texture to be complete. From the specs:
Additionally, a cube map texture is cube complete if the following conditions all hold true: [..] The level_base arrays of each of the six texture images making up the cube map have identical, positive, and square dimensions.
Your code does not specify an image for NEGATIVE_X:
uint TARGETS[6] =
{
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
Using this table, the image for NEGATIVE_Y is specified twice, but it's missing NEGATIVE_X. It should be:
uint TARGETS[6] =
{
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
Instead of enumerating the 6 targets, you can also use GL_TEXTURE_CUBE_MAP_POSITIVE_X + i for i in the range 0..5 to address the 6 targets.

Opengl error C0000 compiling error

I've a problem with opengl shader compiling. The problem is that when I run the program, I obtain this error:
Vertex info
0(1) : error c0000: syntax error, unexpected '' at token ''
the same message is for the fragment object. At the end I obtain a 'program not validated' error. here's my initialization shader code:
struct ShadeState {
int gl_program_id = 0; // OpenGL program handle
int gl_vertex_shader_id = 0; // OpenGL vertex shader handle
int gl_fragment_shader_id = 0; // OpenGL fragment shader handle
};
// initialize the shaders
void init_shaders(ShadeState* state) {
// load shader code from files
auto vertex_shader_code = load_text_file("shade_vertex.glsl");
auto fragment_shader_code = load_text_file("shade_fragment.glsl");
auto vertex_shader_codes = (char *)vertex_shader_code.c_str();
auto fragment_shader_codes = (char *)fragment_shader_code.c_str();
//devono essere costanti altrimenti glShaderSource non li accetta
//auto vertex_codes = (const GLchar*)vertex_shader_codes;
//auto fragment_codes = (const GLchar*)fragment_shader_codes;
//GLint const vert_size = vertex_shader_code.size();
//GLint const frag_size = fragment_shader_code.size();
// create shaders
state->gl_vertex_shader_id = glCreateShader(GL_VERTEX_SHADER); //come da documentazione
state->gl_fragment_shader_id = glCreateShader(GL_FRAGMENT_SHADER); //come da documentazione
// load shaders code onto the GPU
//glShaderCode non esiste!
glShaderSource(state->gl_vertex_shader_id, 1, (const GLchar**)&vertex_shader_codes, NULL);
glShaderSource(state->gl_fragment_shader_id, 1, (const GLchar**)&fragment_shader_codes, NULL);
// compile shaders
glCompileShader(state->gl_vertex_shader_id);
glCompileShader(state->gl_fragment_shader_id);
// check if shaders are valid
//funzione presente in glcommon.h
error_if_glerror();
error_if_shader_not_valid(state->gl_vertex_shader_id);
error_if_shader_not_valid(state->gl_fragment_shader_id);
// create program
state->gl_program_id = glCreateProgram();
// attach shaders
glAttachShader(state->gl_program_id, state->gl_vertex_shader_id);
glAttachShader(state->gl_program_id, state->gl_fragment_shader_id);
// bind vertex attributes locations
//faccio il bind delle variabili in input del vertex shader
glBindAttribLocation(state->gl_program_id, 0, "vertex_pos"); // primo attributo in shade_vertex
glBindAttribLocation(state->gl_program_id, 1, "vertex_norm"); //secondo attributo in shade_vertex
// link program
glLinkProgram(state->gl_program_id);
// check if program is valid
//funzione presente in glcommon.h
error_if_glerror();
error_if_program_not_valid(state->gl_program_id);
}
How ca I resolve?
EDIT
shade_vertex.glsl
#version 120
attribute vec3 vertex_pos; // vertex position (in mesh coordinate frame)
attribute vec3 vertex_norm; // vertex normal (in mesh coordinate frame)
uniform mat4 mesh_frame; // mesh frame (as a matrix)
uniform mat4 camera_frame_inverse; // inverse of the camera frame (as a matrix)
uniform mat4 camera_projection; // camera projection
varying vec3 pos; // [to fragment shader] vertex position (in world coordinate)
varying vec3 norm; // [to fragment shader] vertex normal (in world coordinate)
// main function
void main() {
// compute pos and normal in world space and set up variables for fragment shader (use mesh_frame)
// project vertex position to gl_Position using mesh_frame, camera_frame_inverse and camera_projection
}
shade_fragment.glsl
#version 120
varying vec3 pos; // [from vertex shader] position in world space
varying vec3 norm; // [from vertex shader] normal in world space (need normalization)
uniform vec3 camera_pos; // camera position (center of the camera frame)
uniform vec3 ambient; // scene ambient
uniform int lights_num; // number of lights
uniform vec3 light_pos[16]; // light positions
uniform vec3 light_intensity[16]; // light intensities
uniform vec3 material_kd; // material kd
uniform vec3 material_ks; // material ks
uniform float material_n; // material n
// main
void main() {
// re-normalize normals
// use faceforward to ensure the normals points toward us
// accumulate ambient
vec3 c = vec3(0,0,0)
// foreach light
// compute point light color at pos
// compute light direction at pos
// compute view direction using camera_pos and pos
// compute h
// accumulate blinn-phong model
// output final color by setting gl_FragColor
gl_FragColor = vec4(c,1);
}
GLSL requires that a newline (\n) follow the #version directive.
I suspect your load_text_file() function is either not preserving newlines in the source text files or the text files themselves are missing newlines.
Other issues:
Your vertex shader needs to write to gl_Position.
Your fragment shader is missing a semicolon after vec3 c = vec3(0,0,0)
These (updated) shaders compile on my system:
#include <GL/glew.h>
#include <GL/glut.h>
#include <iostream>
struct Program
{
static GLuint Load( const char* vert, const char* geom, const char* frag )
{
GLuint prog = glCreateProgram();
if( vert ) AttachShader( prog, GL_VERTEX_SHADER, vert );
if( geom ) AttachShader( prog, GL_GEOMETRY_SHADER, geom );
if( frag ) AttachShader( prog, GL_FRAGMENT_SHADER, frag );
glLinkProgram( prog );
CheckStatus( prog );
return prog;
}
private:
static void CheckStatus( GLuint obj )
{
GLint status = GL_FALSE;
if( glIsShader(obj) ) glGetShaderiv( obj, GL_COMPILE_STATUS, &status );
if( glIsProgram(obj) ) glGetProgramiv( obj, GL_LINK_STATUS, &status );
if( status == GL_TRUE ) return;
GLchar log[ 1 << 15 ] = { 0 };
if( glIsShader(obj) ) glGetShaderInfoLog( obj, sizeof(log), NULL, log );
if( glIsProgram(obj) ) glGetProgramInfoLog( obj, sizeof(log), NULL, log );
std::cerr << log << std::endl;
exit( -1 );
}
static void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader );
glAttachShader( program, shader );
glDeleteShader( shader );
}
};
#define GLSL(version, shader) "#version " #version "\n" #shader
const char* vert = GLSL
(
120,
attribute vec3 vertex_pos; // vertex position (in mesh coordinate frame)
attribute vec3 vertex_norm; // vertex normal (in mesh coordinate frame)
uniform mat4 mesh_frame; // mesh frame (as a matrix)
uniform mat4 camera_frame_inverse; // inverse of the camera frame (as a matrix)
uniform mat4 camera_projection; // camera projection
varying vec3 pos; // [to fragment shader] vertex position (in world coordinate)
varying vec3 norm; // [to fragment shader] vertex normal (in world coordinate)
// main function
void main() {
// compute pos and normal in world space and set up variables for fragment shader (use mesh_frame)
// project vertex position to gl_Position using mesh_frame, camera_frame_inverse and camera_projection
gl_Position = vec4( 0, 0, 0, 1 );
}
);
const char* frag = GLSL
(
120,
varying vec3 pos; // [from vertex shader] position in world space
varying vec3 norm; // [from vertex shader] normal in world space (need normalization)
uniform vec3 camera_pos; // camera position (center of the camera frame)
uniform vec3 ambient; // scene ambient
uniform int lights_num; // number of lights
uniform vec3 light_pos[16]; // light positions
uniform vec3 light_intensity[16]; // light intensities
uniform vec3 material_kd; // material kd
uniform vec3 material_ks; // material ks
uniform float material_n; // material n
// main
void main() {
// re-normalize normals
// use faceforward to ensure the normals points toward us
// accumulate ambient
vec3 c = vec3(0,0,0);
// foreach light
// compute point light color at pos
// compute light direction at pos
// compute view direction using camera_pos and pos
// compute h
// accumulate blinn-phong model
// output final color by setting gl_FragColor
gl_FragColor = vec4(c,1);
}
);
void display()
{
glClearColor( 0, 0, 0, 1 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glutSwapBuffers();
}
int main(int argc, char **argv)
{
glutInit( &argc, argv );
glutInitDisplayMode( GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE );
glutInitWindowSize( 600, 600 );
glutCreateWindow( "GLUT" );
glewInit();
GLuint prog = Program::Load( vert, NULL, frag );
glutDisplayFunc( display );
glutMainLoop();
return 0;
}
I resolved with the solution of genpfault. I had to write gl position and add the semicolon. Thanks to you all!

Use normals as colors in OpenGL using assimp

I exported the suzanne model from blender(Monkey head) as a .obj file and I can only see it when I use the RGB values in the fragment shader. e.g. frag_color = vec4( 1.0, 0.0, 0.0, 1.0 ); to make the model red. But it just looks like a deformed texture unless I rotate it
I want to use the normals as colors so that I can see specific details in the face, etc. I bound the normals to vertex position 1.
if ( mesh -> HasNormals() )
{
normals = ( GLfloat * ) malloc( * pointCount * 3 * sizeof( GLfloat ) );
for ( int i = 0; i < * pointCount; i++ )
{
const aiVector3D * vn = &( mesh -> mNormals[ i ] );
normals[ i * 3 ] = ( GLfloat ) vn -> x;
normals[ i * 3 + 1 ] = ( GLfloat ) vn -> y;
normals[ i * 3 + 2 ] = ( GLfloat ) vn -> z;
}
GLuint vbo;
glGenBuffers( 1, &vbo );
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glBufferData( GL_ARRAY_BUFFER, 3 * * pointCount * sizeof( GLfloat ), normals, GL_STATIC_DRAW );
glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, 0, NULL );
glEnableVertexAttribArray( 1 );
free( normals );
}
And I bound 1 to vertex_normal right after attaching the shaders but right before linking.
glAttachShader( program, vertShader );
glAttachShader( program, fragShader );
glBindAttribLocation( program, 0, "vertex_position" );
glBindAttribLocation( program, 1, "vertex_normal" );
glLinkProgram( program );
These are my shaders
vertshader.shader
#version 330
in vec3 vertex_position;
in vec3 vertex_normal;
uniform mat4 proj, view, model;
out vec3 normals;
void main()
{
normals = vertex_normal;
gl_Position = proj * vec4( vec3( view * model * vec4( vertex_position, 1.0 ) ), 1.0 );
}
fragshader.shader
#version 330
in vec3 normals;
out vec4 fragment_color;
void main()
{
fragment_color = vec4( normals, 1.0 );
}
But this only outputs a black screen. I know the model is loading because I can color it red like above. I tried importing vertex_normal directly into the frag shader, that didn't work, I also tried normalizing normals and that didn't change the effect neither.
So how can I use the models normals as colors in the fragment shader?
Ok, I found a fix. Apparently it was blenders fault. There is a side panel on what I want to export with my mesh, and Write normals wasn't checked. Thanks to Reto Koradi, I didn't think it was possible for a mesh to be written without normals.

OpenGL Vertex Buffer incorrect render

I'm having a bit of an odd problem. I'm trying to render some data with OpenGL on my Windows system. I found a set of tutorials at opengl-tutorial.org which were written for OpenGL 3.3. As my laptop (where I do a great deal of developing) only supports OpenGL 2.1, I proceeded to download the OpenGL 2.1 port of the tutorial. I messed around with it a bit, adding features and refactoring it for scalability, but noticed something odd. Whenever I rendered my data with Vertex Buffer Objects, I got a rather incorrect representation of my data. This is shown below.
http://www.majhost.com/gallery/DagonEcelstraun/Others/HelpNeeded/badrender.png
However, when I specify my data using glVertex3fv and such, I get a much nicer result, again shown below.
http://www.majhost.com/gallery/DagonEcelstraun/Others/HelpNeeded/goodrender.png
The problem occurs both on my Windows 8.1 laptop with Intel i3 integrated graphics and on my Windows 7 desktop with its nVidia GTX 660, so it's not a hardware problem. Does anyone know what may be the issue here?
Loading mesh data:
const aiScene *scene = aiImportFile( sName.c_str(),
aiProcessPreset_TargetRealtime_MaxQuality | aiProcess_FlipUVs );
const aiMesh *mesh = scene->mMeshes[0];
for( int i = 0; i < mesh->mNumVertices; i++ ) {
meshData.push_back( mesh->mVertices[i][0] );
meshData.push_back( mesh->mVertices[i][1] );
meshData.push_back( mesh->mVertices[i][2] );
meshData.push_back( mesh->mNormals[i][0] );
meshData.push_back( mesh->mNormals[i][1] );
meshData.push_back( mesh->mNormals[i][2] );
meshData.push_back( mesh->mTextureCoords[0][i][0] );
meshData.push_back( mesh->mTextureCoords[0][i][1] );
meshData.push_back( 0 );
meshData.push_back( mesh->mTangents[i][0] );
meshData.push_back( mesh->mTangents[i][1] );
meshData.push_back( mesh->mTangents[i][2] );
}
for( int i = 0; i < mesh->mNumFaces; i++ ) {
for( int j = 0; j < 3; j++ ) {
indices.push_back( mesh->mFaces[i].mIndices[j] );
}
}
Sending data to the graphics card for the first time (called right after previous code):
glGenBuffers( 1, &glVertData );
glBindBuffer( GL_ARRAY_BUFFER, glVertData );
glBufferData( GL_ARRAY_BUFFER, meshData.size() * sizeof( GLfloat ), &meshData[0], GL_STATIC_DRAW );
// Generate a buffer for the indices as well
glGenBuffers( 1, &glIndexes );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, glIndexes );
glBufferData( GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW );
Rendering the mesh:
//Tell the shader to use our data
//bindVerts, bindUvs, bindNorms, and bindTangents refer to attribute variables in my shader
//vertexPosition_modelspace, vertexUV, vertexNormal_modelspace, and vertexTangent_modelspace, respectively.
this->verts = bindVerts;
this->uvs = bindUvs;
this->norms = bindNorms;
this->tangents = bindTangents;
glEnableVertexAttribArray( verts );
glEnableVertexAttribArray( uvs );
glEnableVertexAttribArray( norms );
glEnableVertexAttribArray( tangents );
//Specify how the graphics card should decode our data
// 1rst attribute buffer : vertices
glBindBuffer( GL_ARRAY_BUFFER, glVertData );
glVertexAttribPointer( verts, 3, GL_FLOAT, GL_FALSE, 12, (void*) 0 );
// 2nd attribute buffer : normals
glVertexAttribPointer( norms, 3, GL_FLOAT, GL_FALSE, 12, (void*) 3 );
//3rd attribute buffer : UVs
glVertexAttribPointer( uvs, 3, GL_FLOAT, GL_FALSE, 12, (void*) 6 );
//4th attribute buffer: tangents
glVertexAttribPointer( tangents, 3, GL_FLOAT, GL_FALSE, 12, (void*) 9 );
// Index buffer
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, glIndexes );
//rendering the mesh with VBOs:
glDrawElements( GL_LINES, indices.size(), GL_UNSIGNED_SHORT, (void*) 0 );
//specifying the vertex data individually:
glBegin( GL_TRIANGLES );
int ind;
for( int i = 0; i < indices.size(); i++ ) {
ind = indices[i] * 12;
glNormal3fv( &meshData[ind + 3] );
glTexCoord2fv( &meshData[ind + 6] );
glVertex3fv( &meshData[ind] );
}
glEnd();
//clean up after the render
glDisableVertexAttribArray( verts );
glDisableVertexAttribArray( uvs );
glDisableVertexAttribArray( norms );
glDisableVertexAttribArray( tangents );
My vertex shader:
#version 130
// Input vertex data, different for all executions of this shader.
//it doesn't work, so we'll just get rid of it
attribute vec3 vertexPosition_modelspace;
attribute vec3 vertexUV;
attribute vec3 vertexNormal_modelspace;
attribute vec3 vertexTangent_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out vec4 ShadowCoord;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightInvDirection_worldspace;
uniform mat4 DepthBiasMVP;
uniform sampler2D normalMap;
attribute vec3 vTangent;
void main() {
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4( vertexPosition_modelspace, 1 );
ShadowCoord = DepthBiasMVP * vec4( vertexPosition_modelspace, 0 );
// Position of the vertex, in worldspace : M * position
Position_worldspace = ( M * vec4( vertexPosition_modelspace, 0 ) ).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
EyeDirection_cameraspace = vec3( 0, 0, 0 ) - ( V * M * vec4( vertexPosition_modelspace, 0 ) ).xyz;
// Vector that goes from the vertex to the light, in camera space
LightDirection_cameraspace = ( V * vec4( LightInvDirection_worldspace, 0 ) ).xyz;
// UV of the vertex. No special space for this one.
UV = vertexUV.st;
// Normal of the the vertex, in camera space
// Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
Normal_cameraspace = ( V * M * vec4( vertexNormal_modelspace.xyz, 0 ) ).xyz;
}
Fragment shader:
#version 130
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
in vec4 ShadowCoord;
out vec4 fragColor;
// Values that stay constant for the whole mesh.
uniform sampler2D diffuse;
uniform mat4 MV;
uniform vec3 LightPosition_worldspace;
uniform sampler2D shadowMap;
//uniform int shadowLevel; //0 is no shadow, 1 is hard shadows, 2 is soft shadows, 3 is PCSS
// Returns a random number based on a vec3 and an int.
float random( vec3 seed, int i ) {
vec4 seed4 = vec4( seed, i );
float dot_product = dot( seed4, vec4( 12.9898, 78.233, 45.164, 94.673 ) );
return fract( sin( dot_product ) * 43758.5453 );
}
int mod( int a, int b ) {
return a - (a / b);
}
void main() {
int shadowLevel = 1; //let's just do hard shadows
// Light emission properties
vec3 LightColor = vec3( 1, 1, 1 );
float LightPower = 1.0f;
// Material properties
vec3 MaterialDiffuseColor = texture( diffuse, UV ).rgb;
vec3 MaterialAmbientColor = vec3( 0.1, 0.1, 0.1 ) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3( 0.3, 0.3, 0.3 );
vec3 n = normalize( Normal_cameraspace );
vec3 l = normalize( LightDirection_cameraspace );
float cosTheta = clamp( dot( n, l ), 0.2, 1 );
// Eye vector (towards the camera)
vec3 E = normalize( EyeDirection_cameraspace );
// Direction in which the triangle reflects the light
vec3 R = reflect( -l, n );
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E, R ), 0, 1 );
float visibility = 1.0;
//variable bias
float bias = 0.005 * tan( acos( cosTheta ) );
bias = clamp( bias, 0, 0.01 );
// dFragment to the light
float dFragment = ( ShadowCoord.z-bias ) / ShadowCoord.w;
float dBlocker = 0;
float penumbra = 1;
float wLight = 5.0;
if( shadowLevel == 3 ) {
// Sample the shadow map 8 times
float count = 0;
float temp;
float centerBlocker = texture( shadowMap, ShadowCoord.xy).r;
float scale = (wLight * (dFragment - centerBlocker)) / dFragment;
for( int i = 0; i < 16; i++ ) {
temp = texture( shadowMap, ShadowCoord.xy + (scale * poissonDisk( i ) / 50.0) ).r;
if( temp < dFragment ) {
dBlocker += temp;
count += 1;
}
}
if( count > 0 ) {
dBlocker /= count;
penumbra = wLight * (dFragment - dBlocker) / dFragment;
}
}
if( shadowLevel == 1 ) {
if( texture( shadowMap, ShadowCoord.xy).r < dFragment ) {
visibility -= 0.8;
}
} else if( shadowLevel > 1 ) {
float iterations = 32;
float sub = 0.8f / iterations;
for( int i = 0; i < iterations; i++ ) {
int index = mod( int( 32.0 * random( gl_FragCoord.xyy, i ) ), 32 );
if( texture( shadowMap, ShadowCoord.xy + (penumbra * poissonDisk( index ) / 250.0) ).r < dFragment ) {
visibility -= sub;
}
}
}
visibility = min( visibility, cosTheta );
//MaterialDiffuseColor = vec3( 0.8, 0.8, 0.8 );
fragColor.rgb = MaterialAmbientColor +
visibility * MaterialDiffuseColor * LightColor * LightPower +
visibility * MaterialSpecularColor * LightColor * LightPower * pow( cosAlpha, 5 );
}
Note that poissonDisk( int ind ) returns a vec2 with a magnitude of no more than 1 which is in a poisson disk distribution. Even though I'm using shader version 130, I used a function and not an array because the array runs rather slowly on my laptop.
I do bind that shader before I do any rendering. I also make sure to upload the correct variables to all of my uniforms, but I didn't show that to save space since I know it's working correctly.
Does anyone know what's causing this incorrect render?
Well, first of all, stop drawing the VBO using GL_LINES. Use the same primitive mode for immediate mode and VBO drawing.
Also, since when is 3*4 = 3? The address (offset) in your VBO vertex pointers should be the number of elements multiplied by the size of the data type when using an interleaved data structure. GL_FLOAT is 4 bytes, if you have a 3-component vertex position this means that the offset to the next field in your VBO is 3*4 = (void *)12, not (void *)3. This process must continue for each additional vertex array pointer, they all use incorrect offsets.
Likewise, the stride of your VBO should be 12 * sizeof (GLfloat) = 48, not 12.