Silhouette-Outlined shader - opengl
I'm trying to implement GLSL shader which would highlight the outer edges of rendered 3D mesh. The problem is that I do not have access to the OpenGL client side code so this must be done only in GLSL shaders.
My first attempt was to use/adopt this shader from Unity and do it in OpenGL GLSL. Here how it should look:
And here is what I got:
I'm not sure If I compute the stuff correctly but as you can see the output is nowhere near my expectations.
Here is the ogre material
material Chassis
{
technique
{
pass standard
{
cull_software back
scene_blend zero one
}
pass psssm
{
cull_software front
scene_blend src_alpha one_minus_src_alpha
vertex_program_ref reflection_cube_specularmap_normalmap_vs100
{
param_named_auto modelViewProjectionMatrix worldviewproj_matrix
param_named_auto normalMatrix inverse_transpose_world_matrix
param_named_auto modelView worldview_matrix
param_named_auto camera_world_position camera_position
param_named_auto inverse_projection_matrix inverse_projection_matrix
param_named_auto projection_matrix projection_matrix
param_named_auto p_InverseModelView inverse_worldview_matrix
}
fragment_program_ref reflection_cube_specularmap_normalmap_fs100
{
}
}
}
}
Here is the vertex shader
#version 140
#define lowp
#define mediump
#define highp
in vec4 vertex;
in vec3 normal;
uniform mat4 normalMatrix;
uniform mat4 modelViewProjectionMatrix;
uniform mat4 modelView;
uniform vec3 camera_world_position;
uniform mat4 projection_matrix;
uniform mat4 inverse_projection_matrix;
void main()
{
vec4 pos = modelViewProjectionMatrix * vertex;
mat4 modelView = inverse_projection_matrix * modelViewProjectionMatrix;
vec4 norm = inverse(transpose(modelView)) * vec4(normal, 0.0);
vec2 offset = vec2( norm.x * projection_matrix[0][0], norm.y * projection_matrix[1][1] );
pos.xy += offset * pos.z * 0.18;
gl_Position = pos;
}
EDIT: I have added the material script which ogre uses and I have added the vertex shader code.
I assume single complex 3D mesh. I would do this with 2 pass rendering:
clear screen
let use (0,0,0) as clear color.
render mesh
disable depth output,test (or clear it afterwards). Do not use shading fill just with some predefined color for example (1,1,1) Lets do this for simple cube:
read the frame buffer and use it as a texture
So either use FBO and render to texture for #1,#2 or use glReadPixels instead and load it as some texture back to GPU (I know it slower but works also on Intel). For more info see both answers in here:
OpenGL Scale Single Pixel Line
clear screen with background color
render
so either render GL_QUAD covering whole screen or render your mesh with shading and what ever you want. You need to pass also the texture from previous step into GLSL.
In fragment render as usual ... but at the end also add this:
Scan all texels around current fragment screen position up to distance equal to outline thickness in the texture from previous step. If any black pixel found in it override outputted color with your outline color. You can even modulate it with the smallest distance to black color.
This is very similar to this:
How to implement 2D raycasting light effect in GLSL
but much simpler. Here result:
I took this example Analysis of a shader in VR of mine and converted it to this:
Fragment:
// Fragment
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 LCS_pos; // fragment position [LCS]
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// outline
uniform sampler2D txr; // texture from previous pass
uniform int thickness; // [pixels] outline thickness
uniform float xs,ys; // [pixels] texture/screen resolution
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// outline effect
if (thickness>0) // thickness effect in second pass
{
int i,j,r=thickness;
float xx,yy,rr,x,y,dx,dy;
dx=1.0/xs; // texel size
dy=1.0/ys;
x=gl_FragCoord.x*dx;
y=gl_FragCoord.y*dy;
rr=thickness*thickness;
for (yy=y-(float(thickness)*dy),i=-r;i<=r;i++,yy+=dy)
for (xx=x-(float(thickness)*dx),j=-r;j<=r;j++,xx+=dx)
if ((i*i)+(j*j)<=rr)
if ((texture(txr,vec2(xx,yy)).r)<0.01)
{
c=vec3(1.0,0.0,0.0); // outline color
i=r+r+1;
j=r+r+1;
break;
}
}
else c=vec3(1.0,1.0,1.0); // render with white in first pass
// output color
col=vec4(c,1.0);
}
The Vertex shader is without change:
// Vertex
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec3 pos;
layout(location = 2) in vec3 nor;
layout(location = 3) in vec3 col;
layout(location = 0) uniform mat4 m_model; // model matrix
layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0)
layout(location =32) uniform mat4 m_view; // inverse of camera matrix
layout(location =48) uniform mat4 m_proj; // projection matrix
out vec3 LCS_pos; // fragment position [LCS]
out vec3 pixel_pos; // fragment position [GCS]
out vec3 pixel_col; // fragment surface color
out vec3 pixel_nor; // fragment surface normal [GCS]
void main()
{
LCS_pos=pos;
pixel_col=col;
pixel_pos=(m_model*vec4(pos,1)).xyz;
pixel_nor=(m_normal*vec4(nor,1)).xyz;
gl_Position=m_proj*m_view*m_model*vec4(pos,1);
}
And CPU side code looks like this:
//---------------------------------------------------------------------------
#include <vcl.h>
#pragma hdrstop
#include "Unit1.h"
#include "gl_simple.h"
//---------------------------------------------------------------------------
#pragma package(smart_init)
#pragma resource "*.dfm"
TForm1 *Form1;
//---------------------------------------------------------------------------
GLfloat lt_pnt_pos[3]={+2.5,+2.5,+2.5};
GLfloat lt_pnt_col[3]={0.8,0.8,0.8};
GLfloat lt_amb_col[3]={0.2,0.2,0.2};
GLuint txrid=0;
GLfloat animt=0.0;
//---------------------------------------------------------------------------
// https://stackoverflow.com/q/46603878/2521214
//---------------------------------------------------------------------------
void gl_draw()
{
// load values into shader
GLint i,id;
GLfloat m[16];
glUseProgram(prog_id);
GLfloat x,y,z,d=0.25;
id=glGetUniformLocation(prog_id,"txr"); glUniform1i(id,0);
id=glGetUniformLocation(prog_id,"xs"); glUniform1f(id,xs);
id=glGetUniformLocation(prog_id,"ys"); glUniform1f(id,ys);
id=64; glUniform3fv(id,1,lt_pnt_pos);
id=67; glUniform3fv(id,1,lt_pnt_col);
id=70; glUniform3fv(id,1,lt_amb_col);
glGetFloatv(GL_MODELVIEW_MATRIX,m);
id=0; glUniformMatrix4fv(id,1,GL_FALSE,m);
m[12]=0.0; m[13]=0.0; m[14]=0.0;
id=16; glUniformMatrix4fv(id,1,GL_FALSE,m);
for (i=0;i<16;i++) m[i]=0.0; m[0]=1.0; m[5]=1.0; m[10]=1.0; m[15]=1.0;
id=32; glUniformMatrix4fv(id,1,GL_FALSE,m);
glGetFloatv(GL_PROJECTION_MATRIX,m);
id=48; glUniformMatrix4fv(id,1,GL_FALSE,m);
// draw VAO cube (no outline)
id=glGetUniformLocation(prog_id,"thickness"); glUniform1i(id,0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vao_draw(); // render cube
// copy frame buffer to CPU memory and than back to GPU as Texture
BYTE *map=new BYTE[xs*ys*4];
glReadPixels(0,0,xs,ys,GL_RGB,GL_UNSIGNED_BYTE,map); // framebuffer -> map[]
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D,txrid);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, xs, ys, 0, GL_RGB, GL_UNSIGNED_BYTE, map); // map[] -> texture txrid
delete[] map;
// draw VAO cube (outline)
id=glGetUniformLocation(prog_id,"thickness"); glUniform1i(id,5);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vao_draw(); // render cube
glDisable(GL_TEXTURE_2D);
// turn of shader
glUseProgram(0);
// rotate the cube to see animation
glMatrixMode(GL_MODELVIEW);
// glRotatef(1.0,0.0,1.0,0.0);
// glRotatef(1.0,1.0,0.0,0.0);
glFlush();
SwapBuffers(hdc);
}
//---------------------------------------------------------------------------
__fastcall TForm1::TForm1(TComponent* Owner):TForm(Owner)
{
gl_init(Handle);
glGenTextures(1,&txrid);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D,txrid);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE,GL_COPY);
glDisable(GL_TEXTURE_2D);
int hnd,siz; char vertex[4096],fragment[4096];
hnd=FileOpen("normal_shading.glsl_vert",fmOpenRead); siz=FileSeek(hnd,0,2); FileSeek(hnd,0,0); FileRead(hnd,vertex ,siz); vertex [siz]=0; FileClose(hnd);
hnd=FileOpen("normal_shading.glsl_frag",fmOpenRead); siz=FileSeek(hnd,0,2); FileSeek(hnd,0,0); FileRead(hnd,fragment,siz); fragment[siz]=0; FileClose(hnd);
glsl_init(vertex,fragment);
// hnd=FileCreate("GLSL.txt"); FileWrite(hnd,glsl_log,glsl_logs); FileClose(hnd);
int i0,i;
mm_log->Lines->Clear();
for (i=i0=0;i<glsl_logs;i++)
if ((glsl_log[i]==13)||(glsl_log[i]==10))
{
glsl_log[i]=0;
mm_log->Lines->Add(glsl_log+i0);
glsl_log[i]=13;
for (;((glsl_log[i]==13)||(glsl_log[i]==10))&&(i<glsl_logs);i++);
i0=i;
}
if (i0<glsl_logs) mm_log->Lines->Add(glsl_log+i0);
vao_init();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormDestroy(TObject *Sender)
{
glDeleteTextures(1,&txrid);
gl_exit();
glsl_exit();
vao_exit();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormResize(TObject *Sender)
{
gl_resize(ClientWidth,ClientHeight-mm_log->Height);
glMatrixMode(GL_PROJECTION);
glTranslatef(0,0,-15.0);
glMatrixMode(GL_MODELVIEW);
glRotatef(-15.0,0.0,1.0,0.0);
glRotatef(-125.0,1.0,0.0,0.0);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormPaint(TObject *Sender)
{
gl_draw();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::Timer1Timer(TObject *Sender)
{
gl_draw();
animt+=0.02; if (animt>1.5) animt=-0.5;
Caption=animt;
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormMouseWheel(TObject *Sender, TShiftState Shift, int WheelDelta, TPoint &MousePos, bool &Handled)
{
GLfloat dz=2.0;
if (WheelDelta<0) dz=-dz;
glMatrixMode(GL_PROJECTION);
glTranslatef(0,0,dz);
gl_draw();
}
//---------------------------------------------------------------------------
As usual the code is using/based on this:
complete GL+GLSL+VAO/VBO C++ example
[Notes]
In case you got multiple objects then use for each object different color in #2. Then in #5 scan for any different color then the one that is in the texel at current position instead of scanning for black.
Also this can be done on 2D image instead of using mesh. You just need to know the background color. So you can use pre-renderd/grabed/screenshoted images for this too.
You can add discard and or change the final if logic to change behaviour (like you want just outline and no mesh inside etc ...). Or you can add the outline color to render color instead of assigning it directly to get the impression of highlight ... instead of coloring
see a),b),c) options in modified fragment:
// Fragment
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 LCS_pos; // fragment position [LCS]
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// outline
uniform sampler2D txr; // texture from previous pass
uniform int thickness; // [pixels] outline thickness
uniform float xs,ys; // [pixels] texture/screen resolution
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// outline effect
if (thickness>0) // thickness effect in second pass
{
int i,j,r=thickness;
float xx,yy,rr,x,y,dx,dy;
dx=1.0/xs; // texel size
dy=1.0/ys;
x=gl_FragCoord.x*dx;
y=gl_FragCoord.y*dy;
rr=thickness*thickness;
for (yy=y-(float(thickness)*dy),i=-r;i<=r;i++,yy+=dy)
for (xx=x-(float(thickness)*dx),j=-r;j<=r;j++,xx+=dx)
if ((i*i)+(j*j)<=rr)
if ((texture(txr,vec2(xx,yy)).r)<0.01)
{
c =vec3(1.0,0.0,0.0); // a) assign outline color
// c+=vec3(1.0,0.0,0.0); // b) add outline color
i=r+r+1;
j=r+r+1;
r=0;
break;
}
// if (r!=0) discard; // c) do not render inside
}
else c=vec3(1.0,1.0,1.0); // render with white in first pass
// output color
col=vec4(c,1.0);
}
[Edit1] single pass approach for smooth edges
As you can not access client side code this approach will work in shader only. For smooth (curved) edged shapes the surface normal is near perpendicular to camera view axis (z). So dot between them is near zero. This can be exploited directly ... Here update of the shaders:
Vertex
// Vertex
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec3 pos;
layout(location = 2) in vec3 nor;
layout(location = 3) in vec3 col;
layout(location = 0) uniform mat4 m_model; // model matrix
layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0)
layout(location =32) uniform mat4 m_view; // inverse of camera matrix
layout(location =48) uniform mat4 m_proj; // projection matrix
out vec3 pixel_pos; // fragment position [GCS]
out vec3 pixel_col; // fragment surface color
out vec3 pixel_nor; // fragment surface normal [GCS]
out vec3 view_nor; // surface normal in camera [LCS]
void main()
{
pixel_col=col;
pixel_pos=(m_model*vec4(pos,1)).xyz;
pixel_nor=(m_normal*vec4(nor,1)).xyz;
mat4 m;
m=m_model*m_view; // model view matrix
m[3].xyz=vec3(0.0,0.0,0.0); // with origin set to (0,0,0)
view_nor=(m*vec4(nor,1.0)).xyz; // object local normal to camera local normal
gl_Position=m_proj*m_view*m_model*vec4(pos,1);
}
Fragment
// Fragment
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// outline
in vec3 view_nor; // surface normal in camera [LCS]
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// outline effect
if (abs(dot(view_nor,vec3(0.0,0.0,1.0)))<=0.5) c=vec3(1.0,0.0,0.0);
// output color
col=vec4(c,1.0);
}
Here preview:
As you can see it works properly for smooth objects but for sharp edges like on cube is this not working at all... You can use the same combinations (a,b,c) as in previous approach.
The m holds modelview matrix with origin set to (0,0,0). That enables it for vector conversion (no translation). For more info see Understanding 4x4 homogenous transform matrices.
The 0.5 in the dot product result if is the thickness of outline. 0.0 means no outline and 1.0 means whole object is outline.
Related
Cannot apply blending to cube located behind half-transparent textured surface
Following the tutorial from learnopengl.com about rendering half-transparent windows glasses using blending, I tried to apply that principle to my simple scene (where we can navigate the scene using the mouse) containing: Cube: 6 faces, each having 2 triangles, constructed using two attributes (position and color) defined in its associated vertex shader and passed to its fragment shader. Grass: 2D Surface (two triangles) to which a png texture was applied using a sampler2D uniform (the background of the png image is transparent). Window: A half-transparent 2D surface based on the same shaders (vertex and fragment) as the grass above. Both textures were downloaded from learnopengl.com The issue I'm facing is that when it comes to the Grass, I can see it through the Window but not the Cube! My code is structured as follows (I left the rendering of the window to the very last on purpose): // enable depth test & blending glEnable(GL_DEPTH_TEST); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_DST_ALPHA); while (true): glClearColor(background.r, background.g, background.b, background.a); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); cube.draw(); grass.draw(); window.draw(); Edit: I'll share below the vertex and fragment shaders used to draw the two textured surfaces (grass and window): #version 130 in vec2 position; in vec2 texture_coord; // opengl tranformation matrices uniform mat4 model; // object coord -> world coord uniform mat4 view; // world coord -> camera coord uniform mat4 projection; // camera coord -> ndc coord out vec2 texture_coord_vert; void main() { gl_Position = projection * view * model * vec4(position, 0.0, 1.0); texture_coord_vert = texture_coord; } #version 130 in vec2 texture_coord_vert; uniform sampler2D texture2d; out vec4 color_out; void main() { vec4 color = texture(texture2d, texture_coord_vert); // manage transparency if (color.a == 0.0) discard; color_out = color; } And the ones used to render the colored cube: #version 130 in vec3 position; in vec3 color; // opengl tranformation matrices uniform mat4 model; // object coord -> world coord uniform mat4 view; // world coord -> camera coord uniform mat4 projection; // camera coord -> ndc coord out vec3 color_vert; void main() { gl_Position = projection * view * model * vec4(position, 1.0); color_vert = color; } #version 130 in vec3 color_vert; out vec4 color_out; void main() { color_out = vec4(color_vert, 1.0); } P.S: My shader programs uses GLSL v1.30, because my internal GPU didn't seem to support later versions. Regarding the piece of code that does the actual drawing, I basically have one instance of a Renderer class for each type of geometry (one shared by both textured surfaces, and one for the cube). This class manages the creation/binding/deletion of VAOs and binding/deletion of VBOs (creation of VBOs made outside the class so I can share vertexes with similar shapes). Its constructor takes as an argument the shader program and the vertex attributes. I'll try to show the relevant piece of code below Renderer::Renderer(Program program, vector attributes) { vao.bind(); vbo.bind(); define_attributes(attributes); vao.unbind(); vbo.unbind(); } Renderer::draw(Uniforms uniforms) { vao.bind(); program.use(); set_uniforms(unfiorms); glDrawArrays(GL_TRIANGLES, 0, n_vertexes); vao.unbind(); program.unuse(); }
Your blend function function depends on the target's alpha channel (GL_ONE_MINUS_DST_ALPHA): glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_DST_ALPHA); dest = src * src_alpha + dest * (1-dest_alpha) If the alpha channel of the cube is 0.0, the color of the cube is not mixed with the color of the window. The traditional alpha blending function depends only on the source alpha channel: glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); dest = src * src_alpha + dest * (1-src_alpha) See also glBlendFunc and Blending
Deferred MSAA Artifacting
This is the process I go through to render the scene: Bind MSAA x4 GBuffer (4 Color Attachments, Position, Normal, Color and Unlit Color (skybox only. I also have a Depth component/Texture). Draw SkyBox Draw Geo Blit all Color and Depth Components to a Single Sample FBO Apply Lighting (I use the depth texture to check if it should be lit by checking if depth texture value is less than 1). Render Quad And this is what is happening: As you can see I get these white and black artefacts around the edge instead of smooth edge. (Good to note that if I remove the lighting and just render the texture without lighting, I don't get this and it smooths correctly). Here is my shader (it has SSAO implemented but that seem to not effect this). #version 410 core in vec2 Texcoord; out vec4 outColor; uniform sampler2D texFramebuffer; uniform sampler2D ssaoTex; uniform sampler2D gPosition; uniform sampler2D gNormal; uniform sampler2D gAlbedo; uniform sampler2D gAlbedoUnlit; uniform sampler2D gDepth; uniform mat4 View; struct Light { vec3 Pos; vec3 Color; float Linear; float Quadratic; float Radius; }; const int MAX_LIGHTS = 32; uniform Light lights[MAX_LIGHTS]; uniform vec3 viewPos; uniform bool SSAO; void main() { vec3 color = texture(gAlbedo, Texcoord).rgb; vec3 colorUnlit = texture(gAlbedoUnlit, Texcoord).rgb; vec3 pos = texture(gPosition, Texcoord).rgb; vec3 norm = normalize(texture( gNormal, Texcoord)).rgb; vec3 depth = texture(gDepth, Texcoord).rgb; float ssaoValue = texture(ssaoTex, Texcoord).r; // then calculate lighting as usual vec3 lighting; if(SSAO) { lighting = vec3(0.3 * color.rgb * ssaoValue); // hard-coded ambient component } else { lighting = vec3(0.3 * color.rgb); // hard-coded ambient component } vec3 posWorld = pos.rgb; vec3 viewDir = normalize(viewPos - posWorld); for(int i = 0; i < MAX_LIGHTS; ++i) { vec4 lightPos = View * vec4(lights[i].Pos,1.0); vec3 normLight = normalize(lightPos.xyz); float distance = length(lightPos.xyz - posWorld); if(distance < lights[i].Radius) { // diffuse vec3 lightDir = normalize(lightPos.xyz - posWorld); vec3 diffuse = max(dot(norm.rgb, lightDir), 0.0) * color.rgb * lights[i].Color; float attenuation = 1.0 / (1.0 + lights[i].Linear * distance + lights[i].Quadratic * distance * distance); lighting += (diffuse*attenuation); } } if(depth.r >= 1) { outColor = vec4(colorUnlit, 1.0); } else { outColor = vec4(lighting, 1.0); } } So the last if statement checks if it is in the depth texture, if it is then apply lighting, if it is not then just draw the skybox (this is so lighting is not applied to the skybox). I have spent a few days trying to work this out, changing ways of checking if it should be light by comparing normals, position and depth, changing the formats to a higher res (e.g. using RGB16F instead of RGB8 etc.) but I can't figure out what is causing it and doing lighting per sample (using texel fetch) would be way to intensive. Any Ideas?
This question is a bit old now but I thought I would say how I solved my issue. I run basic Sobel Filter in my shader which I use to do screen-space outlines, but in addition I also check if MSAA is enabled and if so compute lighting per texel around the edge pixels!
Analysis of a shader in VR
I would like to create a shader like that that takes world coordinates and creates waves. I would like to analyse the video and know the steps that are required. I'm not looking for codes, I'm just looking for ideas on how to implement that using GLSL or HLSL or any other language. Here low quality and fps GIF in case link broke. Here is the fragment shader: #version 330 core // Interpolated values from the vertex shaders in vec2 UV; in vec3 Position_worldspace; in vec3 Normal_cameraspace; in vec3 EyeDirection_cameraspace; in vec3 LightDirection_cameraspace; // highlight effect in float pixel_z; // fragment z coordinate in [LCS] uniform float animz; // highlight animation z coordinate [GCS] // Ouput data out vec4 color; vec3 c; // Values that stay constant for the whole mesh. uniform sampler2D myTextureSampler; uniform mat4 MV; uniform vec3 LightPosition_worldspace; void main(){ // Light emission properties // You probably want to put them as uniforms vec3 LightColor = vec3(1,1,1); float LightPower = 50.0f; // Material properties vec3 MaterialDiffuseColor = texture( myTextureSampler, UV ).rgb; vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor; vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3); // Distance to the light float distance = length( LightPosition_worldspace - Position_worldspace ); // Normal of the computed fragment, in camera space vec3 n = normalize( Normal_cameraspace ); // Direction of the light (from the fragment to the light) vec3 l = normalize( LightDirection_cameraspace ); // Cosine of the angle between the normal and the light direction, // clamped above 0 // - light is at the vertical of the triangle -> 1 // - light is perpendicular to the triangle -> 0 // - light is behind the triangle -> 0 float cosTheta = clamp( dot( n,l ), 0,1 ); // Eye vector (towards the camera) vec3 E = normalize(EyeDirection_cameraspace); // Direction in which the triangle reflects the light vec3 R = reflect(-l,n); // Cosine of the angle between the Eye vector and the Reflect vector, // clamped to 0 // - Looking into the reflection -> 1 // - Looking elsewhere -> < 1 float cosAlpha = clamp( dot( E,R ), 0,1 ); c = // Ambient : simulates indirect lighting MaterialAmbientColor + // Diffuse : "color" of the object MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) + // Specular : reflective highlight, like a mirror MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance); float z; z=abs(pixel_z-animz); // distance to animated z coordinate z*=1.5; // scale to change highlight width if (z<1.0) { z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle z=0.5*cos(z); color+=vec3(0.0,z,z); } color=vec4(c,1.0); } here is the vertex shader: #version 330 core // Input vertex data, different for all executions of this shader. layout(location = 0) in vec3 vertexPosition_modelspace; layout(location = 1) in vec2 vertexUV; layout(location = 2) in vec3 vertexNormal_modelspace; // Output data ; will be interpolated for each fragment. out vec2 UV; out vec3 Position_worldspace; out vec3 Normal_cameraspace; out vec3 EyeDirection_cameraspace; out vec3 LightDirection_cameraspace; out float pixel_z; // fragment z coordinate in [LCS] // Values that stay constant for the whole mesh. uniform mat4 MVP; uniform mat4 V; uniform mat4 M; uniform vec3 LightPosition_worldspace; void main(){ pixel_z=vertexPosition_modelspace.z; // Output position of the vertex, in clip space : MVP * position gl_Position = MVP * vec4(vertexPosition_modelspace,1); // Position of the vertex, in worldspace : M * position Position_worldspace = (M * vec4(vertexPosition_modelspace,1)).xyz; // Vector that goes from the vertex to the camera, in camera space. // In camera space, the camera is at the origin (0,0,0). vec3 vertexPosition_cameraspace = ( V * M * vec4(vertexPosition_modelspace,1)).xyz; EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace; // Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity. vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz; LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace; // Normal of the the vertex, in camera space Normal_cameraspace = ( V * M * vec4(vertexNormal_modelspace,0)).xyz; // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not. // UV of the vertex. No special space for this one. UV = vertexUV; }
there are 2 approaches I can think of for this: 3D reconstruction based so you need to reconstruct the 3D scene from motion (not an easy task and way of my cup of tea). then you simply apply modulation to the selected mesh texture based on u,v texture mapping coordinates and time of animation. Describe such topic will not fit in SO answer so you should google some CV books/papers on the subject instead. Image processing based you simply segmentate the image based on color continuity/homogenity. So you group neighboring pixels that have similar color and intensity (growing regions). When done try to fake surface 3D reconstruction based on intensity gradients similar to this: Turn any 2D image into 3D printable sculpture with code and after that create u,v mapping where one axis is depth. When done then just apply your sin-wave effect modulation to color. I would divide this into 2 stages. 1st pass will segmentate (I would chose CPU side for this) and second for the effect rendering (on GPU). As this is form of augmented reality you should also read this: Augment reality like zookazam btw what is done on that video is neither of above options. They most likely have the mesh for that car already in vector form and use silhouette matching to obtain its orientation on image ... and rendered as usual ... so it would not work for any object on the scene but only for that car ... Something like this: How to get the transformation matrix of a 3d model to object in a 2d image [Edit1] GLSL highlight effect I took this example: complete GL+GLSL+VAO/VBO C++ example And added the highlight to it like this: On CPU side I added animz variable it determines the z coordinate in object local coordinate system LCS where the highlight is actually placed. and I animate it in timer between min and max z value of rendered mesh (cube) +/- some margin so the highlight does not teleport at once from one to another side of object... // global float animz=-1.0; // in timer animz+=0.05; if (animz>1.5) animz=-1.5; // my object z = <-1,+1> 0.5 is margin // render id=glGetUniformLocation(prog_id,"animz"); glUniform1f(id,animz); Vertex shader I just take vertex z coordinate and pass it without transform into fragment out float pixel_z; // fragment z coordinate in [LCS] pixel_z=pos.z; Fragment shader After computing target color c (by standard rendering) I compute distance of pixel_z and animz and if small then I modulate c with a sinwave depended on the distance. // highlight effect float z; z=abs(pixel_z-animz); // distance to animated z coordinate z*=1.5; // scale to change highlight width if (z<1.0) { z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle z=0.5*cos(z); c+=vec3(0.0,z,z); } Here the full GLSL shaders... Vertex: #version 400 core #extension GL_ARB_explicit_uniform_location : enable layout(location = 0) in vec3 pos; layout(location = 2) in vec3 nor; layout(location = 3) in vec3 col; layout(location = 0) uniform mat4 m_model; // model matrix layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0) layout(location =32) uniform mat4 m_view; // inverse of camera matrix layout(location =48) uniform mat4 m_proj; // projection matrix out vec3 pixel_pos; // fragment position [GCS] out vec3 pixel_col; // fragment surface color out vec3 pixel_nor; // fragment surface normal [GCS] // highlight effect out float pixel_z; // fragment z coordinate in [LCS] void main() { pixel_z=pos.z; pixel_col=col; pixel_pos=(m_model*vec4(pos,1)).xyz; pixel_nor=(m_normal*vec4(nor,1)).xyz; gl_Position=m_proj*m_view*m_model*vec4(pos,1); } Fragment: #version 400 core #extension GL_ARB_explicit_uniform_location : enable layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS] layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength in vec3 pixel_pos; // fragment position [GCS] in vec3 pixel_col; // fragment surface color in vec3 pixel_nor; // fragment surface normal [GCS] out vec4 col; // highlight effect in float pixel_z; // fragment z coordinate in [LCS] uniform float animz; // highlight animation z coordinate [GCS] void main() { // standard rendering float li; vec3 c,lt_dir; lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS] li=dot(pixel_nor,lt_dir); if (li<0.0) li=0.0; c=pixel_col*(lt_amb_col+(lt_pnt_col*li)); // highlight effect float z; z=abs(pixel_z-animz); // distance to animated z coordinate z*=1.5; // scale to change highlight width if (z<1.0) { z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle z=0.5*cos(z); c+=vec3(0.0,z,z); } col=vec4(c,1.0); } And preview: This approach does not require textures nor u,v mapping. [Edit2] highlight with start point There are many ways how to implement it. I chose distance from the start point as a highlight parameter. So the highlight will grow from the point in all directions. Here preview for two different touch point locations: The white bold cross is the location of touch point rendered for visual check. Here the code: Vertex: // Vertex #version 400 core #extension GL_ARB_explicit_uniform_location : enable layout(location = 0) in vec3 pos; layout(location = 2) in vec3 nor; layout(location = 3) in vec3 col; layout(location = 0) uniform mat4 m_model; // model matrix layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0) layout(location =32) uniform mat4 m_view; // inverse of camera matrix layout(location =48) uniform mat4 m_proj; // projection matrix out vec3 LCS_pos; // fragment position [LCS] out vec3 pixel_pos; // fragment position [GCS] out vec3 pixel_col; // fragment surface color out vec3 pixel_nor; // fragment surface normal [GCS] void main() { LCS_pos=pos; pixel_col=col; pixel_pos=(m_model*vec4(pos,1)).xyz; pixel_nor=(m_normal*vec4(nor,1)).xyz; gl_Position=m_proj*m_view*m_model*vec4(pos,1); } Fragment: // Fragment #version 400 core #extension GL_ARB_explicit_uniform_location : enable layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS] layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength in vec3 LCS_pos; // fragment position [LCS] in vec3 pixel_pos; // fragment position [GCS] in vec3 pixel_col; // fragment surface color in vec3 pixel_nor; // fragment surface normal [GCS] out vec4 col; // highlight effect uniform vec3 touch; // highlight start point [GCS] uniform float animt; // animation parameter <0,1> or -1 for off uniform float size; // highlight size void main() { // standard rendering float li; vec3 c,lt_dir; lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS] li=dot(pixel_nor,lt_dir); if (li<0.0) li=0.0; c=pixel_col*(lt_amb_col+(lt_pnt_col*li)); // highlight effect float t=length(LCS_pos-touch)/size; // distance from start point if (t<=animt) { t*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle t=0.75*cos(t); c+=vec3(0.0,t,t); } col=vec4(c,1.0); } You control this with uniforms: uniform vec3 touch; // highlight start point [GCS] uniform float animt; // animation parameter <0,1> or -1 for off uniform float size; // max distance of any point of object from touch point
OpenGL 3.3 deferred shading not working
I've setup an OpenGL environment with deferred shading following this tutorial but I can't make the second shader output on my final buffer. I can see that the first shader (the one that doesn't use lights) is working properly because with gDEBugger I can see that the output buffers are correct, but the second shader really can't display anything. I've also tried to make the second shader output a single color for all the scene just to see if it was displying something, bot nothing is visible (the screen should be completely red but it isn't). The first pass shader (the one I use to create the buffers for the GBuffer) is working so I'm not add it's code or how I created and implemented my GBuffer, but if you need I'll add them, just tell me. I think the problem is when I tell OpenGL to output on the FrameBuffer 0 (my video). This is how I enalbe OpenGL to write to the FrameBuffer 0: glEnable(GL_BLEND); m_MotoreGrafico->glBlendEquation(GL_FUNC_ADD); glBlendFunc(GL_ONE, GL_ONE); // Abilito la scrittura sul buffer finale m_MotoreGrafico->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); m_gBuffer.BindForReading(); glClear(GL_COLOR_BUFFER_BIT); // Imposto le matrici dello shader SetUpOGLProjectionViewMatrix(1); // Passo le texture del GBuffer allo shader pActiveShader->setUniform1i(_T("gPositionMap"), m_gBuffer.GetPositionTexture()); pActiveShader->setUniform1i(_T("gColorMap"), m_gBuffer.GetDiffuseTexture()); pActiveShader->setUniform1i(_T("gNormalMap"), m_gBuffer.GetNormalTexture()); // Passo variabili necessarie allo shader float dimensioneFinestra[2], posizioneCamera[3]; dimensioneFinestra[0] = m_nLarghezzaFinestra; dimensioneFinestra[1] = m_nAltezzaFinestra; m_MotoreGrafico->GetActiveCameraPosition(posizioneCamera); pActiveShader->setUniform2f(_T("gScreenSize"), dimensioneFinestra); pActiveShader->setUniform3f(_T("gCameraPos"), posizioneCamera); pActiveShader->setUniform1i(_T("gUsaLuci"), 0); // Disegno le luci float coloreLuce[3], posizioneLuce[3], direzioneLuce[3], vUpLuce[3], vRightLuce[3], intensita; for(int i = 0; i < GetDocument()->m_RTL.GetNLights(); i++) { CRTLuce* pRTLuce = GetDocument()->m_RTL.GetRTLightAt(i); ... m_MotoreGrafico->glBindVertexArray(pRTLuce->GetRTLuce()->GetVBO()->getVBAIndex()); glDrawArrays(GL_TRIANGLES, 0, pRTLuce->GetRTLuce()->GetNVertPerShader()); } The function m_gBuffer.BindForReading() is like this (bot I think it doesn't matter for my problem): for (unsigned int i = 0 ; i < ARRAY_SIZE_IN_ELEMENTS(m_textures); i++) { m_pMotoreGrafico->glActiveTexture(GL_TEXTURE0 + i); glBindTexture(GL_TEXTURE_2D, m_textures[GBUFFER_TEXTURE_TYPE_POSITION + i]); } So far my GBuffer is working (it creates the textures) and my first shader is also working (it's drawing the textures of my GBuffer). The problem then is that I can't reset OpenGL to draw in my video. The first 4 textures are the ones create with the first-pass shader. This is my back buffer (after the second-pass shader) And this is my front buffer (after the second-pass shader) This is my second-pass fragment shader code (it outputs only red) out vec4 outputColor; void main() { outputColor = vec4(1.0, 0.0, 0.0, 1.0); } Does anyone have an idea of what I'm doing wrong? Second-pass vertex shader code: #version 330 uniform struct Matrici { mat4 projectionMatrix; mat4 modelMatrix; mat4 viewMatrix; } matrices; layout (location = 0) in vec3 inPosition; void main() { vec4 vEyeSpacePosVertex = matrices.viewMatrix * matrices.modelMatrix * vec4(inPosition, 1.0); gl_Position = matrices.projectionMatrix * vEyeSpacePosVertex; } Second-pass fragment shader code: #version 330 uniform struct MDLight { vec3 vColor; vec3 vPosition; vec3 vDirection; float fAmbientIntensity; float fStrength; int bOn; float fConeCosine; float fAltezza; float fLarghezza; vec3 vUp; vec3 vRight; } gLuce; uniform float gSpecularIntensity; uniform float gSpecularPower; uniform sampler2D gPositionMap; uniform sampler2D gColorMap; uniform sampler2D gNormalMap; uniform vec3 gCameraPos; uniform vec2 gScreenSize; uniform int gLightType; uniform int gUsaLuci; vec2 CalcTexCoord() { return gl_FragCoord.xy / gScreenSize; } out vec4 outputColor; void main() { vec2 TexCoord = CalcTexCoord(); vec4 Color = texture(gColorMap, TexCoord); outputColor = vec4(1.0, 0.0, 0.0, 1.0); }
How do I get textures to work in OpenGL?
I'm using the tutorials on http://arcsynthesis.org/gltut/ to learn OpenGL, it's required, I have to use it. Mostly I want to apply the textures from Tutorial 15 onto objects in tutorial 7 (world with UBO). For now it seemed like the textures only work when mipmaps are turned on. This comes with a downside: The only mipmap used is the one with an index of zero, and that's the 1 colored 1x1 pixel one. I tried setting the minimum level of a mipmap higher or turning off mipmaps entirely, but even that doesn't fix thing, because then everything turns pitch black. Now I'll list the most important parts of my program EDIT: I guess I'll add more details... The vertex shader has something like this: #version 330 layout(location = 0) in vec4 position; layout(location = 1) in vec4 color; layout(location = 2) in vec3 normal; //Added these later layout(location = 5) in vec2 texCoord; out vec2 colorCoord; smooth out vec4 interpColor; out vec3 vertexNormal; out vec3 modelSpacePosition; out vec3 cameraSpacePosition; uniform mat4 worldToCameraMatrix; uniform mat4 modelToWorldMatrix; uniform mat3 normalModelToCameraMatrix; uniform vec3 dirToLight; uniform vec4 lightIntensity; uniform vec4 ambientIntensity; uniform vec4 baseColor; uniform mat4 cameraToClipMatrix; void main() { vertexNormal = normal; vec3 normCamSpace = normalize(normalModelToCameraMatrix * vertexNormal); cameraSpacePosition = normCamSpace; float cosAngIncidence = dot(normCamSpace, dirToLight); cosAngIncidence = clamp(cosAngIncidence, 0, 1); modelSpacePosition.x = position.x; modelSpacePosition.y = position.y; modelSpacePosition.z = position.z; vec4 temp = modelToWorldMatrix * position; temp = worldToCameraMatrix * temp; gl_Position = cameraToClipMatrix * temp; interpColor = ((lightIntensity * cosAngIncidence) + (ambientIntensity)) * baseColor; colorCoord= texCoord ; } The fragment shader like this: #version 330 in vec3 vertexNormal; in vec3 modelSpacePosition; smooth in vec4 interpColor; uniform vec3 modelSpaceLightPos; uniform vec4 lightIntensity2; uniform vec4 ambientIntensity2; out vec4 outputColor; //Added later in vec2 colorCoord; uniform sampler2D colorTexture; void main() { vec3 lightDir2 = normalize(modelSpacePosition - modelSpaceLightPos); float cosAngIncidence2 = dot(normalize(vertexNormal), lightDir2); cosAngIncidence2 = clamp(cosAngIncidence2, 0, 1); float light2DistanceSqr = dot(modelSpacePosition - modelSpaceLightPos, modelSpacePosition - modelSpaceLightPos); //added vec4 texture2 = texture(colorTexture, colorCoord); outputColor = ((ambientIntensity2 + (interpColor*2))/4) + ((((interpColor) * lightIntensity2/200 * cosAngIncidence2) + (ambientIntensity2* interpColor )) /( ( sqrt(light2DistanceSqr) + light2DistanceSqr)/200 )); //No outputColor for texture testing outputColor = texture2 ; } } Those were both shaders. And here are the parts added to the .cpp: #include <glimg/glimg.h> #include "../framework/directories.h" [...] const int g_colorTexUnit = 0; GLuint g_checkerTexture = 0; And here's the loader for the texture: void LoadCheckerTexture() { try { std::string filename(LOCAL_FILE_DIR); filename += "checker.dds"; std::auto_ptr<glimg::ImageSet> pImageSet(glimg::loaders::dds::LoadFromFile(filename.c_str())); glGenTextures(1, &g_checkerTexture); glBindTexture(GL_TEXTURE_2D, g_checkerTexture); glimg::SingleImage image = pImageSet->GetImage(0, 0, 0); glimg::Dimensions dims = image.GetDimensions(); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, dims.width, dims.height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, image.GetImageData()); glBindTexture(GL_TEXTURE_2D, 0); } catch(std::exception &e) { printf("%s\n", e.what()); throw; } } Naturally I've got this in void init(): LoadCheckerTexture(); And then when rendering the object: glActiveTexture(GL_TEXTURE0 + g_colorTexUnit); glBindTexture(GL_TEXTURE_2D,g_checkerTexture); g_pLeftMesh->Render(); glBindSampler(g_colorTexUnit, 0); glBindTexture(GL_TEXTURE_2D, 0); With all of this, I get put pitch black for everything, however when I change the outputColor equation into "texture + outputColor;", everything looks normal. I have no idea what I'm doing wrong here. A friend tried to help me, we removed some unnecessairy stuff, but we got nothing running.
Ok guys, I've worked on this whole thing, and did manage to somehow get it running. First off I had to add samplers: GLuint g_samplers; //Add Later void CreateSamplers() { glGenSamplers(1, &g_samplers); glSamplerParameteri(g_samplers, GL_TEXTURE_WRAP_S, GL_REPEAT); glSamplerParameteri(g_samplers, GL_TEXTURE_WRAP_T, GL_REPEAT); //Linear mipmap Nearest glSamplerParameteri(g_samplers, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glSamplerParameteri(g_samplers, GL_TEXTURE_MIN_FILTER, GL_NEAREST); } I also added this to the file thing: glimg::OpenGLPixelTransferParams xfer = glimg::GetUploadFormatType(pImageSet->GetFormat(), 0); glimg::SingleImage image = pImageSet->GetImage(0, 0, 0); glimg::Dimensions dims = image.GetDimensions(); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dims.width, dims.height, 0, xfer.format, xfer.type, image.GetImageData()); The xfer variable does get the format and type adjusted to the dds. Also the render code got turned into this: //Added necessary glActiveTexture(GL_TEXTURE0 + g_colorTexUnit); glBindTexture(GL_TEXTURE_2D,g_checkerTexture); glBindSampler(g_colorTexUnit, g_samplers); g_pLeftMesh->Render(); glBindSampler(g_colorTexUnit, 0); glBindTexture(GL_TEXTURE_2D, 0); And of course at the end of init() I needed to add the CreateSamplers thing: //Added this later LoadCheckerTexture(); CreateSamplers(); I'm sorry for all the trouble with all this, but guess OpenGL really is just this confusing and it was just dumb luck that I got it right. Just posting this so that people know
Your fail to add textures may be caused by: Have you add texture coordinates to objects? (this is the most probable cause, because you are adding textures to non textured tutorial), add textures to VAO. Did you add uniform textureunit (Sampler2D)? (it must be uniform, else texturing will not work properly) Is your texture loaded,binded,enabled (GL_TEXTURE_2D) ? Is your active texture unit - 0? if not change layout/multitexture coords or set active texture 0 This two codes are simple texturing shaders (texture unit 0) no special things (like light,blend,bump,...): tm_l2g is transformation local obj space -> world space (Modelview) tm_g2s is transformation world space -> screen space (Projection) pos are vertex coordinates txt are texture coordinates col are colors Do not forget to change uniform names and layout locations to yours. Vertex: //------------------------------------------------------------------ #version 420 core //------------------------------------------------------------------ uniform mat4x4 tm_l2g; uniform mat4x4 tm_g2s; layout(location=0) in vec3 pos; layout(location=1) in vec4 col; layout(location=2) in vec2 txr; out smooth vec4 pixel_col; out smooth vec2 pixel_txr; //------------------------------------------------------------------ void main(void) { vec4 p; p.xyz=pos; p.w=1.0; p=tm_l2g*p; p=tm_g2s*p; gl_Position=p; pixel_col=col; pixel_txr=txr; } //------------------------------------------------------------------ fragment: //------------------------------------------------------------------ #version 420 core //------------------------------------------------------------------ in smooth vec4 pixel_col; in smooth vec2 pixel_txr; uniform sampler2D txr_texture0; out layout(location=0) vec4 frag_col; //------------------------------------------------------------------ void main(void) { vec4 col; col=texture(txr_texture0,pixel_txr.st); frag_col=col*pixel_col; } //------------------------------------------------------------------ [edit1] CPU old style OpenGL render code (initializations are not included its only render code they can be found here) //------------------------------------------------------------------ // set modelview,projection,textures,bind GLSL programs... GLfloat a=10.0,z=0.0; glColor3f(1.0,1.0,1.0); glBegin(GL_QUADS); // textured quad glTexCoord2f(0.0,0.0); glVertex3f(-a,-a,z); glTexCoord2f(0.0,1.0); glVertex3f(-a,+a,z); glTexCoord2f(1.0,1.0); glVertex3f(+a,+a,z); glTexCoord2f(1.0,0.0); glVertex3f(+a,-a,z); // reverse order quad to be shore that at least one passes by CULL_FACE glTexCoord2f(1.0,0.0); glVertex3f(+a,-a,z); glTexCoord2f(1.0,1.0); glVertex3f(+a,+a,z); glTexCoord2f(0.0,1.0); glVertex3f(-a,+a,z); glTexCoord2f(0.0,0.0); glVertex3f(-a,-a,z); glEnd(); //------------------------------------------------------------------ [edit2] ok here goes VAO/VBO render code,... //------------------------------------------------------------------------------ // enum of VBO locations (it is also your layout location) I use enums for simple in code changes enum _vbo_enum { _vbo_pos=0, // glVertex _vbo_col, // glColor _vbo_tan, // glNormal _vbo_unused0, // unused (at least i dont see anything at this location in your code) _vbo_unused1, // unused (at least i dont see anything at this location in your code) _vbo_txr, // glTexCoord _vbos }; //------------------------------------------------------------------------------ // 'global' names and size for OpenGL mesh in VAO/VBO ... similar ot texture names/handles GLuint vao[1],vbo[_vbos],num_pnt=0; //------------------------------------------------------------------------------ void VAO_init_cube() // call this before VAO use,...but after OpenGL init ! { //[1] first you need some model to render (mesh), here is a simple cube // size,position of cube - change it that it is visible in your scene const GLfloat a=1.0,x=0.0,y=0.0,z=0.0; // cube points 3f x,y,z GLfloat mesh_pos[]= { x-a,y-a,z-a,x-a,y+a,z-a,x+a,y+a,z-a,x+a,y-a,z-a, x-a,y-a,z+a,x-a,y+a,z+a,x+a,y+a,z+a,x+a,y-a,z+a, x-a,y-a,z-a,x-a,y-a,z+a,x+a,y-a,z+a,x+a,y-a,z-a, x-a,y+a,z-a,x-a,y+a,z+a,x+a,y+a,z+a,x+a,y+a,z-a, x-a,y-a,z-a,x-a,y+a,z-a,x-a,y+a,z+a,x-a,y-a,z+a, x+a,y-a,z-a,x+a,y+a,z-a,x+a,y+a,z+a,x+a,y-a,z+a, }; // cube colors 3f r,g,b GLfloat mesh_col[]= { 0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,1.0,0.0,0.0, 0.0,0.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0, 0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,0.0,0.0, 0.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0, 1.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0, }; // cube normals 3f x,y,z GLfloat mesh_tan[]= { -0.6,-0.6,-0.6,-0.6,+0.6,-0.6,+0.6,+0.6,-0.6,+0.6,-0.6,-0.6, -0.6,-0.6,+0.6,-0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,-0.6,+0.6, -0.6,-0.6,-0.6,-0.6,-0.6,+0.6,+0.6,-0.6,+0.6,+0.6,-0.6,-0.6, -0.6,+0.6,-0.6,-0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,-0.6, -0.6,-0.6,-0.6,-0.6,+0.6,-0.6,-0.6,+0.6,+0.6,-0.6,-0.6,+0.6, +0.6,-0.6,-0.6,+0.6,+0.6,-0.6,+0.6,+0.6,+0.6,+0.6,-0.6,+0.6, }; // cube texture coords 2f s,t GLfloat mesh_txr[]= { 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, }; // init VAO/VBO glGenVertexArrays(1,vao); // allocate 1 x VAO glGenBuffers(_vbos,vbo); // allocate _vbos x VBO // copy mesh to VAO/VBO ... after this you do not need the mesh anymore GLint i,sz,n; // n = number of numbers per 1 entry glBindVertexArray(vao[0]); num_pnt=sizeof(mesh_pos)/(sizeof(GLfloat)*3); // num of all points in mesh i=_OpenGLVAOgfx_pos; n=3; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_pos,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); i=_OpenGLVAOgfx_col; n=3; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_col,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); i=_OpenGLVAOgfx_tan; n=3; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_tan,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); i=_OpenGLVAOgfx_txr; n=2; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_txr,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); glBindVertexArray(0); } //------------------------------------------------------------------------------ void VAO_draw() // call this to draw your mesh,... need to enable and bind textures,... before use { glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glBindVertexArray(vao[0]); glEnableVertexAttribArray(_vbo_pos); glEnableVertexAttribArray(_vbo_col); glEnableVertexAttribArray(_vbo_tan); glDisableVertexAttribArray(_vbo_unused0); glEnableVertexAttribArray(_vbo_txr); glDrawArrays(GL_QUADS,0,num_pnt); glDisableVertexAttribArray(_vbo_pos); glDisableVertexAttribArray(_vbo_col); glDisableVertexAttribArray(_vbo_tan); glDisableVertexAttribArray(_vbo_unused0); glDisableVertexAttribArray(_vbo_unused1); glDisableVertexAttribArray(_vbo_txr); glBindVertexArray(0); } //------------------------------------------------------------------------------ void VAO_exit() // clean up ... call this when you do not need VAO/VBO anymore { glDisableVertexAttribArray(_vbo_pos); glDisableVertexAttribArray(_vbo_col); glDisableVertexAttribArray(_vbo_tan); glDisableVertexAttribArray(_vbo_unused0); glDisableVertexAttribArray(_vbo_unused1); glDisableVertexAttribArray(_vbo_txr); glBindVertexArray(0); glDeleteVertexArrays(1,vao); glDeleteBuffers(_vbos,vbo); } //------------------------------------------------------------------------------ [edit3] if you are win32/64 user you can try my IDE for GLSL It is very simple and easy to use, but cannot change texture/attrib locations. Press [F1] for help,... [F9] for run [F10] for return to normal OpenGL mode. Also txt-editor is little buggy sometimes but it is enough for my purpose. GLSL IDE