Recognize any logic errors? Any suggestions to improve this code or to enhance the results?
Image: Before & After
Video: Short video demonstrating current bump mapping results!
The normal and tangent vectors being sent to the vertex shader are correct.
Method in fragment shader:
vec3 processPointLight(in vec3 norm, in vec3 pos, in int lightIndex)
{
float distance = length(lights[lightIndex].Position.xyz - pos.xyz);
//Light Attenuation Computation
float attenuation = 1.0f / (
(lights[lightIndex].AttenuationConst +
lights[lightIndex].AttenuationLinear * distance +
lights[lightIndex].AttenuationQuad * distance * distance));
vec3 textureDifResult;
vec3 textureSpecResult;
if (Toggle_Textures)
{
vec3 textureNrmResult;
textureDifResult = textureLod(Texture0, ((fs_in.textureCoord.xy * (vec2(1024) / vec2(7168))) + (vec2(1024) * (fs_in.textureUnits.x + 0)) / vec2(7168)), 0).rgb;
textureNrmResult = textureLod(Texture0, ((fs_in.textureCoord.xy * (vec2(1024) / vec2(7168))) + (vec2(1024) * (fs_in.textureUnits.x + 1)) / vec2(7168)), 0).rgb;
textureSpecResult = textureLod(Texture0, ((fs_in.textureCoord.xy * (vec2(1024) / vec2(7168))) + (vec2(1024) * (fs_in.textureUnits.x + 2)) / vec2(7168)), 0).rgb;
// Transform normal vector to range [-1,1]
norm = (textureNrmResult * 2.0f - vec3(1.0f));
}
else
{
textureDifResult = ModelColor.rgb;
textureSpecResult = ModelColor.rgb;
}
//viewPoint, normal, tangent and bitangent are in view space so we need to transform the light's position to view space too.. Special thanks to pleup!
vec3 L =normalize( mat3(fs_in.mv_Matrix) * lights[lightIndex].Position.xyz ) - fs_in.viewPoint;
vec3 lightDir = normalize(vec3(dot(L, fs_in.tangent.xyz), dot(L, fs_in.bitangent.xyz), dot(L, norm)));
vec3 reflection = reflect(-L, norm);
// Light Diffuse Computation
vec3 diffuse = ( max(0.0f, dot( norm ,lightDir )) * lights[lightIndex].Diffuse * textureDifResult);
// Light Ambience Computation
vec3 ambient = lights[lightIndex].Ambient;
// Light Specular Computation
vec3 specular = vec3(0.0f, 0.0f, 0.0f);
//if (dot(lightDir , norm ) > 0.0f)
specular = pow(max(0.0f, dot(reflect(-lightDir, norm), fs_in.eyeDir.xyz )), 20.0) * textureSpecResult * lights[lightIndex].Specular * lights[lightIndex].SpecularPower;
return min((ambient)+attenuation * (diffuse + specular), vec3(1.0));
}
Complete Vertex Shader:
#version 420 core
layout(location=0) in vec4 in_Position;
layout(location=1) in vec4 in_TexCoord;
layout(location=2) in vec4 in_Normal;
layout(location=3) in vec4 in_Tangent;
layout(location=4) in uvec4 in_TextureUnits;
// per frame
uniform mat4 ViewMatrix;
uniform mat4 ProjectionMatrix;
uniform vec4 EyePosition;
// per item
uniform mat4 ModelMatrix;
uniform mat4 ModelMatrixOrientation;
out VS_OUT
{
vec4 eyePosition;
vec4 worldPosition;
vec4 normal;
vec4 tangent;
vec4 bitangent;
vec4 eyeDir;
vec3 textureCoord;
flat uvec4 textureUnits;
vec3 viewPoint;
mat4 mv_Matrix;
} vs_out;
void main()
{
gl_Position = ModelMatrix * in_Position;
vs_out.worldPosition = ModelMatrix * in_Position;
vs_out.eyePosition = ProjectionMatrix * ViewMatrix * ModelMatrix * in_Position;
vs_out.textureCoord = vec3(in_TexCoord.x, in_TexCoord.y, in_TexCoord.z);
vs_out.textureUnits = in_TextureUnits;
gl_Position = ProjectionMatrix * ViewMatrix * gl_Position;
vs_out.mv_Matrix = ViewMatrix * ModelMatrix;
//vertex Position in view space..
vec4 P = vs_out.mv_Matrix * in_Position;
vs_out.viewPoint= P.xyz;
vs_out.normal = normalize(vec4(mat3( vs_out.mv_Matrix) * in_Normal.xyz, 1.0f));
// Calculate normal (N) and tangent (T) vectors in view space from incoming object space vectors.
vs_out.tangent = normalize(vec4(mat3( vs_out.mv_Matrix ) * in_Tangent.xyz, 1.0f) );
// Calculate the bitangent vector (B) from the normal and tangent vectors
vs_out.bitangent = vec4( cross(vs_out.normal.xyz, vs_out.tangent.xyz), 1.0f);
// The view vector is the vector from the point of interest to the viewer, which in view space is simply the negative of the position.
// Calculate that and multiply it by the TBN matrix.
vec3 V = -P.xyz;
vs_out.eyeDir = normalize(vec4(dot(V, vs_out.tangent.xyz), dot(V, vs_out.bitangent.xyz), dot(V, vs_out.normal.xyz),1.0f));
return;
}
Light properties:
<LightComponent type="PointLight" isEnabled="1" specularPower="1.0f" offsetX="0.0f" offsetY="40.0f" offsetZ="0.0f" ambientR="0.0f" ambientG="0.0f" ambientB="0.0f" diffuseR="0.3f" diffuseG="0.3f" diffuseB="0.3f" specularR="0.3f" specularG="0.3f" specularB="0.3f" attenConst="0.3f" attenLinear="0.000091427f" attenQuad="0.0000065429f" />
Texture Manager function that loads the textures:
void cTextureManager::loadTextureMipmap(rapidxml::xml_node<>* textureNode){
std::string pathDif = textureNode->first_attribute("pathDif")->value();
std::string pathNrm = textureNode->first_attribute("pathNrm")->value();
std::string pathSpec = textureNode->first_attribute("pathSpec")->value();
FREE_IMAGE_FORMAT imgFormat = FreeImage_GetFileType(pathDif.c_str(), 0);//Get current format (assume all textures are the same format)
FIBITMAP* imagenDif = FreeImage_Load(imgFormat, pathDif.c_str());
imagenDif = FreeImage_ConvertTo32Bits(imagenDif);
FIBITMAP* imagenNrm = FreeImage_Load(imgFormat, pathNrm.c_str());
imagenNrm = FreeImage_ConvertTo32Bits(imagenNrm);
FIBITMAP* imagenSpec = FreeImage_Load(imgFormat, pathSpec.c_str());
imagenSpec = FreeImage_ConvertTo32Bits(imagenSpec);
int width = FreeImage_GetWidth(imagenDif); // Assume images are the same size..
int height = FreeImage_GetHeight(imagenDif);
GLubyte* textureDif = new GLubyte[4 * width*height];
GLubyte* textureNrm = new GLubyte[4 * width*height];
GLubyte* textureSpec = new GLubyte[4 * width*height];
char* pixelsDif = (char*)FreeImage_GetBits(imagenDif);
char* pixelsNrm = (char*)FreeImage_GetBits(imagenNrm);
char* pixelsSpec = (char*)FreeImage_GetBits(imagenSpec);
//FreeImage loads in BGR format, so you need to swap some bytes(Or use GL_BGR).
for (int j = 0; j<width*height; j++) {
textureDif[j * 4 + 0] = pixelsDif[j * 4 + 2];
textureDif[j * 4 + 1] = pixelsDif[j * 4 + 1];
textureDif[j * 4 + 2] = pixelsDif[j * 4 + 0];
textureDif[j * 4 + 3] = pixelsDif[j * 4 + 3];
textureNrm[j * 4 + 0] = pixelsNrm[j * 4 + 2];
textureNrm[j * 4 + 1] = pixelsNrm[j * 4 + 1];
textureNrm[j * 4 + 2] = pixelsNrm[j * 4 + 0];
textureNrm[j * 4 + 3] = pixelsNrm[j * 4 + 3];
textureSpec[j * 4 + 0] = pixelsSpec[j * 4 + 2];
textureSpec[j * 4 + 1] = pixelsSpec[j * 4 + 1];
textureSpec[j * 4 + 2] = pixelsSpec[j * 4 + 0];
textureSpec[j * 4 + 3] = pixelsSpec[j * 4 + 3];
}
int tempMipmapLevel = gCurrentMipmapLevel;
std::string name = textureNode->first_attribute("name")->value();
gMap_TextureNameToMipmapLevel[name] = tempMipmapLevel;
gCurrentMipmapLevel+= 3;
// Assume the texture is already bound to the GL_TEXTURE_2D target
glTexSubImage2D(GL_TEXTURE_2D, // 2D texture
0, // Level 0
(1024 * tempMipmapLevel), (1024 * tempMipmapLevel), // Offset 0, 0
1024, 1024, // 1024 x 1024 texels
GL_RGBA, // Four channel data
GL_UNSIGNED_BYTE, // Floating point data
(GLvoid*)textureDif); // Pointer to data
delete [] textureDif;
// Assume the texture is already bound to the GL_TEXTURE_2D target
glTexSubImage2D(GL_TEXTURE_2D, // 2D texture
0,
1024 * (tempMipmapLevel+1), 1024 * (tempMipmapLevel+1),
1024, 1024,
GL_RGBA,
GL_UNSIGNED_BYTE,
(GLvoid*)textureNrm);
delete[] textureNrm;
// Assume the texture is already bound to the GL_TEXTURE_2D target
glTexSubImage2D(GL_TEXTURE_2D, // 2D texture
0,
1024 * (tempMipmapLevel +2), 1024 * (tempMipmapLevel+2),
1024, 1024,
GL_RGBA,
GL_UNSIGNED_BYTE,
(GLvoid*)textureSpec);
delete[] textureSpec;
// int MaxTextureImageUnits;
// glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &MaxTextureImageUnits);
}
Thanks in advance for any advice!
Related
I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`
I am currently trying to implement a Kawase Blur shader in GLES3.2.
I found the appropriate fragment shaders on shadertoy, and implemented them as such:
Down:
#version 100
precision mediump float;
varying mediump vec2 v_texcoord; // is in 0-1
uniform sampler2D tex;
uniform float radius;
uniform vec2 halfpixel;
void main() {
vec2 uv = v_texcoord * 2.0;
vec4 sum = texture2D(tex, uv) * 4.0;
sum += texture2D(tex, uv - halfpixel.xy * radius);
sum += texture2D(tex, uv + halfpixel.xy * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, -halfpixel.y) * radius);
sum += texture2D(tex, uv - vec2(halfpixel.x, -halfpixel.y) * radius);
gl_FragColor = sum / 8.0;
}
up:
#version 100
precision mediump float;
varying mediump vec2 v_texcoord; // is in 0-1
uniform sampler2D tex;
uniform float radius;
uniform vec2 halfpixel;
void main() {
vec2 uv = v_texcoord / 2.0;
vec4 sum = texture2D(tex, uv + vec2(-halfpixel.x * 2.0, 0.0) * radius);
sum += texture2D(tex, uv + vec2(-halfpixel.x, halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(0.0, halfpixel.y * 2.0) * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(halfpixel.x * 2.0, 0.0) * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, -halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(0.0, -halfpixel.y * 2.0) * radius);
sum += texture2D(tex, uv + vec2(-halfpixel.x, -halfpixel.y) * radius) * 2.0;
gl_FragColor = sum / 12.0;
}
The thought process is simple: the texture I want to blur is in the Primary FB, then it's passed once with a down blur to the Mirror FB, and then blurred down more and up on the Mirror FB, and finally later rendered.
The down shader works great, and produces the expected result of a small image in the top-left corner, with streaks throughout the rest of the framebuffer.
Image of shader Down
However, when trying to apply even one pass of the up shader, it starts giving horrible artifacts. Blocks of pixels blinking everywhere, and the screen is divided into 4 sections vertically, where in each one the original image gets bigger, blurrier and more glitchy.
Image of shader Up
This result is with 2 passes of the down shader and one of the up shader
The code:
const float fullVerts[] = {
1, 0, // top right
0, 0, // top left
1, 1, // bottom right
0, 1, // bottom left
};
// ...
{
auto drawPass = [&](CShader* pShader) {
glActiveTexture(GL_TEXTURE0);
if (pShader == &m_shBLUR2)
glBindTexture(PMIRRORFB->m_cTex.m_iTarget, PMIRRORFB->m_cTex.m_iTexID);
glTexParameteri(PMIRRORFB->m_cTex.m_iTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glUseProgram(pShader->program);
// prep two shaders
glUniformMatrix3fv(pShader->proj, 1, GL_FALSE, glMatrix);
glUniform1f(glGetUniformLocation(pShader->program, "radius"), BLURSIZE * (a / 255.f)); // this makes the blursize change with a
if (pShader == &m_shBLUR1)
glUniform2f(glGetUniformLocation(m_shBLUR1.program, "halfpixel"), 0.5f / (m_RenderData.pMonitor->vecSize.x / 2.f), 0.5f / (m_RenderData.pMonitor->vecSize.y / 2.f));
else
glUniform2f(glGetUniformLocation(m_shBLUR2.program, "halfpixel"), 0.5f / (m_RenderData.pMonitor->vecSize.x * 2.f), 0.5f / (m_RenderData.pMonitor->vecSize.y * 2.f));
glUniform1i(pShader->tex, 0);
glVertexAttribPointer(pShader->posAttrib, 2, GL_FLOAT, GL_FALSE, 0, fullVerts);
glVertexAttribPointer(pShader->texAttrib, 2, GL_FLOAT, GL_FALSE, 0, fullVerts);
glEnableVertexAttribArray(pShader->posAttrib);
glEnableVertexAttribArray(pShader->texAttrib);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(pShader->posAttrib);
glDisableVertexAttribArray(pShader->texAttrib);
};
// draw the things.
// first draw is prim -> mirr
PMIRRORFB->bind();
clear(CColor(0,0,0,0));
glBindTexture(m_mMonitorRenderResources[m_RenderData.pMonitor].primaryFB.m_cTex.m_iTarget, m_mMonitorRenderResources[m_RenderData.pMonitor].primaryFB.m_cTex.m_iTexID);
drawPass(&m_shBLUR1);
// now draw from mirror->mirror
glBindTexture(PMIRRORFB->m_cTex.m_iTarget, PMIRRORFB->m_cTex.m_iTexID);
for (int i = 1; i < BLURPASSES; ++i) {
drawPass(&m_shBLUR1); // down
}
for (int i = BLURPASSES - 1; i >= 0; --i) {
drawPass(&m_shBLUR2); // up
}
}
What's causing the artifacts?
I have a plain that is created using this method: Heres an image showing my way of thinking
I separate these stripes of triangles so that each one can have unique color and I still am able to use Vertex Indexing.
My problem is whit normals buffer. I create normals like this (this is in the algorithm that calculates vertices):
//Calculating Vertices
for (unsigned int z = 0; z < m_size; z++)
{
for (unsigned int x = 0; x <= m_size; x++)
{
Vertices.push_back(glm::vec3(m_startingPos.x + x * m_sqrWidth, m_startingPos.y, m_startingPos.z + z * m_sqrWidth));
Vertices.push_back(glm::vec3(m_startingPos.x + x * m_sqrWidth, m_startingPos.y, m_startingPos.z + (z + 1) * m_sqrWidth));
glm::vec3 TL = glm::vec3(m_startingPos.x + x * m_sqrWidth, m_startingPos.y, m_startingPos.z + z * m_sqrWidth);
glm::vec3 TR = glm::vec3(m_startingPos.x + (x + 1) * m_sqrWidth, m_startingPos.y, m_startingPos.z + z * m_sqrWidth);
glm::vec3 BL = glm::vec3(m_startingPos.x + x * m_sqrWidth, m_startingPos.y, m_startingPos.z + (z + 1) * m_sqrWidth);
glm::vec3 BR = glm::vec3(m_startingPos.x + (x + 1) * m_sqrWidth, m_startingPos.y, m_startingPos.z + (z + 1) * m_sqrWidth);
//Normals:
Normals.push_back(glm::normalize(glm::cross(TL - BR, BL - BR)));
Normals.push_back(glm::normalize(glm::cross(TR - BR, TL - BR)));
//Color:
colors.push_back(0.0f); colors.push_back(0.0f); colors.push_back(0.5f);
colors.push_back(0.0f); colors.push_back(0.5f); colors.push_back(0.0f);
}
}
So every normal is 0 1 0, I know this.
Here I create a buffer and attribPointer for normals (Its all in a class so unsigned int normalsBuffer is declared in .h file and initialised as NULL):
glGenBuffers(1, &normalsBuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalsBuffer);
glBufferData(GL_ARRAY_BUFFER, Normals.size() * sizeof(float) * 3, &Normals[0].x, GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
In glVertexAttribPointer inedex is set to 2 because vertices take 0 and colors take 1.
So now heres my shader:
#shader vertex
#version 330 core
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 color_in;
layout(location = 2) in vec3 normals_in;
uniform mat4 u_MVP;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
out vec3 FragPos;
out vec3 Normal;
out vec3 color_f;
void main()
{
color_f = color_in;
FragPos = vec3(model * vec4(position, 1.0));
Normal = normals_in;
Normal = mat3(transpose(inverse(model))) * normals_in;
gl_Position = projection * view * vec4(FragPos, 1.0);
};
#shader fragment
#version 330 core
out vec4 color;
in flat vec3 Normal;
in vec3 FragPos;
in flat vec3 color_f;
uniform vec4 u_Color;
uniform vec3 lightPos;
uniform vec3 viewPos;
uniform vec3 lightColor;
void main()
{
vec3 objectColor = color_f;
// ambient
float ambientStrength = 0.1;
vec3 ambient = ambientStrength * lightColor;
// diffuse
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(lightPos - FragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = diff * lightColor;
// specular
float specularStrength = 0.5;
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32);
vec3 specular = specularStrength * spec * lightColor;
vec3 result = (ambient + diffuse + specular) * objectColor;
color = vec4(result, 1.0);
}
Heres an example how object with same shader works and how my plain behaves, It totally lacks diffuse lighting
If I go to the fragment shader and set vec3 norm = vec3(0.0, 1.0, 0.0) it all works fine but thats not the way I want to do it.
So I send color data the same way and its fine but sending normals data doesnt seem to be working.
Heres how it looks like when I got to fragment shader and set norm to 0 1 0
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
so you enable the VertexAttribArray for the color ( 1 )
you should activate the VertexAttribArray for the normals ( 2 ) and also
make sure you got the (void*)0 correct if you are using a struct make sure to
use offsetof it is more reliable especially if you use compiler with optimizations
I've got a basic OpenGL application and I want to use my projection matrix.
This is my matrix:
WorldCoordinates.m[0][0] = 2.0f / Width - 1.0f; WorldCoordinates.m[0][1] = 0; WorldCoordinates.m[0][2] = 0, WorldCoordinates.m[0][3] = 0;
WorldCoordinates.m[1][0] = 0; WorldCoordinates.m[1][1] = 2.0f / Height - 1.0f; WorldCoordinates.m[1][2] = 0, WorldCoordinates.m[1][3] = 0;
WorldCoordinates.m[2][0] = 0; WorldCoordinates.m[2][1] = 0; WorldCoordinates.m[2][2] = 0, WorldCoordinates.m[2][3] = 0;
WorldCoordinates.m[3][0] = 0; WorldCoordinates.m[3][1] = 0; WorldCoordinates.m[3][2] = 0, WorldCoordinates.m[3][3] = 0;
(WorldCoordinates is the Matrix4 struct that contains just a variable called m that is a float[4][4])(Width and Height are two ints).
I then apply this coordinates to my vertex shader using this:
shader.Bind();
glUniformMatrix4fv(glGetUniformLocation(shader.GetProgramID(), "worldCoordinates"), 1, GL_TRUE, &WorldCoordinates.m[0][0]);
(Shader is a class and has got a Bind() method that is just glUseProgram).
This is my Vertex Shader GLSL
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 Color;
out vec2 TexCoord;
uniform mat4 worldCoordinates;
void main()
{
gl_Position = worldCoordinates * vec4(position, 1.0f);
Color = color;
TexCoord = texCoord;
}
Using this, it doesn't work. But changing the gl_Position call to this:
gl_Position = vec4(vec3(position.x * 1/400 -1, position.y * 1/300 -1, 1.0f), 1.0f);
it renders as expected. Why is that?
This is how you build a orthogonal projection matrix :
static void
mat4_ortho(mat4_t m, float left, float right, float bottom, float top, float near, float far)
{
float rl = right - left;
float tb = top - bottom;
float fn = far - near;
mat4_zero(m);
m[ 0] = 2.0f / rl;
m[ 5] = 2.0f / tb;
m[10] = -2.0f / fn;
m[12] = -(left + right) / rl;
m[13] = -( top + bottom) / tb;
m[14] = -( far + near) / fn;
m[15] = 1.0f;
}
For you case, you'd set left=0, right=width, bottom=0, top=height, near and far don't matter, just set -1.0 and 1.0 for instance.
With such a matrix, the vertex coordinates you use for drawing will map 1:1 with the pixels on screen.
I am attempting to reconstruct my fragment's position from a depth value stored in a GL_DEPTH_ATTACHMENT. To do this, I linearize the depth then multiply the depth by a ray from the camera position and to the corresponding point on the far plane.
This method is the second one described here. In order to get the ray from the camera to the far plane, I retrieve rays to the four corners of the far planes, pass them to my vertex shader, then interpolate into the fragment shader. I am using the following code to get the rays from the camera to the far plane's corners in world space.
std::vector<float> Camera::GetFlatFarFrustumCorners() {
// rotation is the orientation of my camera in a quaternion.
glm::quat inverseRotation = glm::inverse(rotation);
glm::vec3 localUp = glm::normalize(inverseRotation * glm::vec3(0.0f, 1.0f, 0.0f));
glm::vec3 localRight = glm::normalize(inverseRotation * glm::vec3(1.0f, 0.0f, 0.0f));
float farHeight = 2.0f * tan(90.0f / 2) * 100.0f;
float farWidth = farHeight * aspect;
// 100.0f is the distance to the far plane. position is the location of the camera in word space.
glm::vec3 farCenter = position + glm::vec3(0.0f, 0.0f, -1.0f) * 100.0f;
glm::vec3 farTopLeft = farCenter + (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
glm::vec3 farTopRight = farCenter + (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));
glm::vec3 farBottomLeft = farCenter - (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
glm::vec3 farBottomRight = farCenter - (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));
return {
farTopLeft.x, farTopLeft.y, farTopLeft.z,
farTopRight.x, farTopRight.y, farTopRight.z,
farBottomLeft.x, farBottomLeft.y, farBottomLeft.z,
farBottomRight.x, farBottomRight.y, farBottomRight.z
};
}
Is this a correct way to retrieve the corners of the far plane in world space?
When I use these corners with my shaders, the results are incorrect, and what I get seems to be in view space. These are the shaders I am using:
Vertex Shader:
layout(location = 0) in vec2 vp;
layout(location = 1) in vec3 textureCoordinates;
uniform vec3 farFrustumCorners[4];
uniform vec3 cameraPosition;
out vec2 st;
out vec3 frustumRay;
void main () {
st = textureCoordinates.xy;
gl_Position = vec4 (vp, 0.0, 1.0);
frustumRay = farFrustumCorners[int(textureCoordinates.z)-1] - cameraPosition;
}
Fragment Shader:
in vec2 st;
in vec3 frustumRay;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform vec3 cameraPosition;
uniform vec3 lightPosition;
out vec3 color;
void main () {
// Far and near distances; Used to linearize the depth value.
float f = 100.0;
float n = 0.1;
float depth = (2 * n) / (f + n - (texture(depthTexture, st).x) * (f - n));
vec3 position = cameraPosition + (normalize(frustumRay) * depth);
vec3 normal = texture(normalTexture, st);
float k = 0.00001;
vec3 distanceToLight = lightPosition - position;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
vec3 diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz+diffuse, gamma);
//color = texture(colorTexture, st);
//colour.r = (2 * n) / (f + n - texture( tex, st ).x * (f - n));
//colour.g = (2 * n) / (f + n - texture( tex, st ).y* (f - n));
//colour.b = (2 * n) / (f + n - texture( tex, st ).z * (f - n));
}
This is what my scene's lighting looks like under these shaders:
I am pretty sure that this is the result of either my reconstructed position being completely wrong, or it being in the wrong space. What is wrong with my reconstruction, and what can I do to fix it?
What you will first want to do is develop a temporary addition to your G-Buffer setup that stores the initial position of each fragment in world/view space (really, whatever space you are trying to reconstruct here). Then write a shader that does nothing but reconstruct these positions from the depth buffer. Set everything up so that half of your screen is displays the original G-Buffer and the other half displays your reconstructed position. You should be able to immediately spot discrepancies this way.
That said, you might want to take a look at an implementation I have used in the past to reconstruct (object space) position from the depth buffer. It basically gets you into view space first, then uses the inverse modelview matrix to go to object space. You can adjust it for world space trivially. It is probably not the most flexible implementation, what with FOV being hard-coded and all, but you can easily modify it to use uniforms instead...
Trimmed down fragment shader:
flat in mat4 inv_mv_mat;
in vec2 uv;
...
float linearZ (float z)
{
#ifdef INVERT_NEAR_FAR
const float f = 2.5;
const float n = 25000.0;
#else
const float f = 25000.0;
const float n = 2.5;
#endif
return n / (f - z * (f - n)) * f;
}
vec4
reconstruct_pos (float depth)
{
depth = linearZ (depth);
vec4 pos = vec4 (uv * depth, -depth, 1.0);
vec4 ret = (inv_mv_mat * pos);
return ret / ret.w;
}
It takes a little additional setup in the vertex shader stage of the deferred shading lighting pass, which looks like this:
#version 150 core
in vec4 vtx_pos;
in vec2 vtx_st;
uniform mat4 modelview_mat; // Matrix used when the G-Buffer was built
uniform mat4 camera_matrix; // Matrix used to stretch the G-Buffer over the viewport
uniform float buffer_res_x;
uniform float buffer_res_y;
out vec2 tex_st;
flat out mat4 inv_mv_mat;
out vec2 uv;
// Hard-Coded 45 degree FOV
//const float fovy = 0.78539818525314331; // NV pukes on the line below!
//const float fovy = radians (45.0);
//const float tan_half_fovy = tan (fovy * 0.5);
const float tan_half_fovy = 0.41421356797218323;
float aspect = buffer_res_x / buffer_res_y;
vec2 inv_focal_len = vec2 (tan_half_fovy * aspect,
tan_half_fovy);
const vec2 uv_scale = vec2 (2.0, 2.0);
const vec2 uv_translate = vec2 (1.0, 1.0);
void main (void)
{
inv_mv_mat = inverse (modelview_mat);
tex_st = vtx_st;
gl_Position = camera_matrix * vtx_pos;
uv = (vtx_st * uv_scale - uv_translate) * inv_focal_len;
}
Depth range inversion is something you might find useful for deferred shading, normally a perspective depth buffer gives you more precision than you need at close range and not enough far away for quality reconstruction. If you flip things on their head by inverting the depth range you can even things out a little bit while still using the hardware depth buffer. This is discussed in detail here.