OpenGL Matrix Multiplication C++ - c++

So I am trying to multiply rotation and translation matrices together and I can't quite figure out what is going wrong.
If, in the program I multiply a translation matrix by a rotation matrix then send that matrix as a uniform to my shader program I end up with the object becoming 2D then 3D again as it spins [ https://a.pomf.se/xvvrsg.mp4 ] (object on the right).
shader.setUniformMat4("model_matrix", Matrix4::translation(Vector3(10.0f, 0.0f, 0.0f)) * Matrix4::rotation(rotation, Vector3(0.0f, 1.0f, 0.0f)));
(vertex shader)
#version 330 core
layout (location = 0) in vec4 in_position;
layout (location = 1) in vec4 in_normal;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 model_matrix = mat4(1.0);
out vec4 pos;
out vec4 normal;
void main()
{
pos = pr_matrix * vw_matrix * model_matrix * in_position;
normal = in_normal;
gl_Position = pos;
}
However if I send my individual translation and rotation matrices as separate uniforms and then multiply them in the shader to create my model matrix it works as intended [ https://a.pomf.se/jyxpnb.mp4 ] (object on the right).
shader.setUniformMat4("translation_matrix", Matrix4::translation(Vector3(10.0f, 0.0f, 0.0f)));
shader.setUniformMat4("rotation_matrix", Matrix4::rotation(rotation, Vector3(0.0f, 1.0f, 0.0f)));
shader.setUniformMat4("scale_matrix", Matrix4::identity());
(vertex shader)
#version 330 core
layout (location = 0) in vec4 in_position;
layout (location = 1) in vec4 in_normal;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 translation_matrix = mat4(1.0);
uniform mat4 rotation_matrix = mat4(1.0);
uniform mat4 scale_matrix = mat4(1.0);
out vec4 pos;
out vec4 normal;
void main()
{
mat4 model_matrix = translation_matrix * rotation_matrix * scale_matrix;
pos = pr_matrix * vw_matrix * model_matrix * in_position;
normal = in_normal;
gl_Position = pos;
}
This leads me to believe that there must be an error in my multiplication of matrices, this is how I am currently doing it:
Matrix4 &Matrix4::multiply(const Matrix4 &other)
{
elements[0] = elements[0] * other.elements[0] + elements[4] * other.elements[1] + elements[8] * other.elements[2] + elements[12] * other.elements[3];
elements[1] = elements[1] * other.elements[0] + elements[5] * other.elements[1] + elements[9] * other.elements[2] + elements[13] * other.elements[3];
elements[2] = elements[2] * other.elements[0] + elements[6] * other.elements[1] + elements[10] * other.elements[2] + elements[14] * other.elements[3];
elements[3] = elements[3] * other.elements[0] + elements[7] * other.elements[1] + elements[11] * other.elements[2] + elements[15] * other.elements[3];
elements[4] = elements[0] * other.elements[4] + elements[4] * other.elements[5] + elements[8] * other.elements[6] + elements[12] * other.elements[7];
elements[5] = elements[1] * other.elements[4] + elements[5] * other.elements[5] + elements[9] * other.elements[6] + elements[13] * other.elements[7];
elements[6] = elements[2] * other.elements[4] + elements[6] * other.elements[5] + elements[10] * other.elements[6] + elements[14] * other.elements[7];
elements[7] = elements[3] * other.elements[4] + elements[7] * other.elements[5] + elements[11] * other.elements[6] + elements[15] * other.elements[7];
elements[8] = elements[0] * other.elements[8] + elements[4] * other.elements[9] + elements[8] * other.elements[10] + elements[12] * other.elements[11];
elements[9] = elements[1] * other.elements[8] + elements[5] * other.elements[9] + elements[9] * other.elements[10] + elements[13] * other.elements[11];
elements[10] = elements[2] * other.elements[8] + elements[6] * other.elements[9] + elements[10] * other.elements[10] + elements[14] * other.elements[11];
elements[11] = elements[3] * other.elements[8] + elements[7] * other.elements[9] + elements[11] * other.elements[10] + elements[15] * other.elements[11];
elements[12] = elements[0] * other.elements[12] + elements[4] * other.elements[13] + elements[8] * other.elements[14] + elements[12] * other.elements[15];
elements[13] = elements[1] * other.elements[12] + elements[5] * other.elements[13] + elements[9] * other.elements[14] + elements[13] * other.elements[15];
elements[14] = elements[2] * other.elements[12] + elements[6] * other.elements[13] + elements[10] * other.elements[14] + elements[14] * other.elements[15];
elements[15] = elements[3] * other.elements[12] + elements[7] * other.elements[13] + elements[11] * other.elements[14] + elements[15] * other.elements[15];
return *this;
}
I did have a nested loop to do this but I ended up writing it all out while trying to find out an answer to this problem. Bear in mind the matrices are in column major and do not get transposed by OpenGL
The rotation and translation matrices are as follows but I don't believe there is any problem with them:
Matrix4 Matrix4::translation(const Vector3 &translation)
{
Matrix4 result(1.0f);
result.elements[0 + 3 * 4] = translation.x;
result.elements[1 + 3 * 4] = translation.y;
result.elements[2 + 3 * 4] = translation.z;
return result;
}
Matrix4 Matrix4::rotation(float angle, const Vector3 &axis)
{
Matrix4 result(1.0f);
float r = toRadians(angle);
float c = (float)cos(r);
float s = (float)sin(r);
float cFlip = 1.0f - c;
result.elements[0 + 0 * 4] = axis.x * cFlip + c;
result.elements[1 + 0 * 4] = axis.y * axis.x * cFlip + axis.z * s;
result.elements[2 + 0 * 4] = axis.x * axis.z * cFlip - axis.y * s;
result.elements[0 + 1 * 4] = axis.x * axis.y * cFlip - axis.z * s;
result.elements[1 + 1 * 4] = axis.y * cFlip + c;
result.elements[2 + 1 * 4] = axis.y * axis.z * cFlip + axis.x * s;
result.elements[0 + 2 * 4] = axis.x * axis.y * cFlip + axis.y * s;
result.elements[1 + 2 * 4] = axis.y * axis.z * cFlip - axis.x * s;
result.elements[2 + 2 * 4] = axis.z * cFlip + c;
return result;
}
Any ideas on what the problem here could be or how to fix it would be greatly appreciated :^)

At your multiply function, you wrote:
elements[0] = elements[0] * other.elements[0] ...
...
Notice that element[0] got its contents actualized now and then you do:
elements[8] = elements[0] * other.elements[8] ...
which use the new value and not the original one. I guess, you want to make a copy of your original matrix before doing this multiplication

opssss !! i just saw !!!
in your multiply, your output matrice is the first input matrix, so the latest operations are calculated with coefficient of the multiplied matrix !! :
elements[0] = elements[0] * .....
....
elements[4] = elements[0] * ..... /* here element[ 0 ] is the top left
element of the multiplied matix */
moreover, operator* shouldn't modfify (nor return) one of his operand, operator*= is here for that

Related

OpenGL Triangle pipe around line segment

I would like to ask how can I render in geometry shader a triangle pipe from a line segment?
I first compute perpendicular vector "perp" to the line vector "axis".
Then I rotate the "perp" vector few times by "rotate" function.
Since mesh is composed from 8 vertices I am trying to use "triangle_strip".
My current code :
#version 330 core
layout (lines) in;
layout(triangle_strip, max_vertices = 8) out;//triangle_strip
uniform float u_thickness ;
uniform vec2 u_viewportSize ;
uniform bool u_scale_width_by_zoom ;
in gl_PerVertex
{
vec4 gl_Position;
//float gl_PointSize;
//float gl_ClipDistance[];
} gl_in[];
vec4 rotate(vec4 p, float x, float y, float z,float angle )
{
vec3 q;
q[0] = p[0] * (x*x * (1.0 - cos(angle)) + cos(angle))
+ p[1] * (x*y * (1.0 - cos(angle)) + z * sin(angle))
+ p[2] * (x*z * (1.0 - cos(angle)) - y * sin(angle));
q[1] = p[0] * (y*x * (1.0 - cos(angle)) - z * sin(angle))
+ p[1]* (y*y * (1.0 - cos(angle)) + cos(angle))
+ p[2] * (y*z * (1.0 - cos(angle)) + x * sin(angle));
q[2] = p[0] * (z*x * (1.0 - cos(angle)) + y * sin(angle))
+ p[1] * (z*y * (1.0 - cos(angle)) - x * sin(angle))
+ p[2] * (z*z * (1.0 - cos(angle)) + cos(angle));
return vec4(q, 0.0);
}
void main() {
//https://stackoverflow.com/questions/54686818/glsl-geometry-shader-to-replace-gllinewidth
vec4 p1 = gl_in[0].gl_Position;
vec4 p2 = gl_in[1].gl_Position;
//tube
// Specify the axis to rotate about:
vec4 axis = p2-p1;
float x = axis[0];
float y = axis[1];
float z = axis[2];
axis = normalize(axis);
//float length = hypotf(axis[0], hypotf(axis[1], axis[2]));
float length = sqrt((axis[0]*axis[0]) + sqrt(axis[1]*axis[1]+ axis[2]*axis[2]));
float dir_scalar = (axis[0] > 0.0) ? length : -length;
float xt = axis[0] + dir_scalar;
float dot = -axis[1] / (dir_scalar * xt);
vec3 perp_0 = vec3(dot * xt, 1.0f + dot * axis.y, dot * axis.z);
perp_0 = normalize(perp_0);
vec4 perp = vec4(perp_0,0)*u_thickness*0.001;
//side0
vec4 p1_1 = p1+perp;
vec4 p1_2 = p2+perp;
vec4 perp_rot_2=rotate(perp,x,y,z,60.0 * 3.14 / 180.0);
vec4 p2_1 = p1+perp_rot_2;
vec4 p2_2 = p2+perp_rot_2;
vec4 perp_rot_3=rotate(perp,x,y,z, 120.0 * 3.14 / 180.0);
vec4 p3_1 = p1+perp_rot_3;
vec4 p3_2 = p2+perp_rot_3;
gl_Position = p1_1;
EmitVertex();
gl_Position = p1_2;
EmitVertex();
gl_Position = p2_1;
EmitVertex();
gl_Position = p2_2;
EmitVertex();
gl_Position = p3_1;
EmitVertex();
gl_Position = p3_2;
EmitVertex();
gl_Position = p1_1;
EmitVertex();
gl_Position = p1_2;
EmitVertex();
EndPrimitive();
}
It produces wrong results:

Half of normal mapped object is inverted

I have tried to implement normal mapping for 3 days now in vulkan. I pre-calculate the tangents and bitangents in the simple obj loader i made and create the TBN matrix in the shader. For now I'm just trying to get it to work, optimizations is saved for later. I'm very confused for why this problem occurs, I have googled around a lot which led me to check for handedness in the texture coordinates. This hasn't solved my problem though. I'm not sure what could be the problem or why this doesn't look correct. I worked with openGL before and this problem also occurred then. The normal mapping works on another model, but not this one.
Example of the problem
This is how i pre-calculate the tangents and bitangents
for (unsigned int i = 0; i < vertices.size(); i += 3)
{
Vector3f deltapos1 = vertices[i + 1].position - vertices[i].position;
Vector3f deltapos2 = vertices[i + 2].position - vertices[i].position;
Vector2f deltauv1 = vertices[i + 1].uv - vertices[i].uv;
Vector2f deltauv2 = vertices[i + 2].uv - vertices[i].uv;
float f;
Vector3f tangent(1, 0, 0);
Vector3f bitangent(0, 1, 0);
float det = deltauv1.x * deltauv2.y - deltauv1.y * deltauv2.x;
if (det != 0)
{
f = 1.0f / det;
tangent = (deltapos1 * deltauv2.y - deltapos2 * deltauv1.y) * f;
bitangent = (deltapos2 * deltauv1.x - deltapos1 * deltauv2.x) * f;
}
vertices[i].tangent = tangent;
vertices[i + 1].tangent = tangent;
vertices[i + 2].tangent = tangent;
vertices[i].bitangent = bitangent;
vertices[i + 1].bitangent = bitangent;
vertices[i + 2].bitangent = bitangent;
}
for (unsigned int i = 0; i < vertices.size(); i++)
{
vertices[i].tangent = Math::normalize(vertices[i].tangent - vertices[i].normal * Math::dot(vertices[i].tangent, vertices[i].normal));
if (Math::dot(Math::cross(vertices[i].bitangent, vertices[i].tangent), vertices[i].normal) < 0.0f)
vertices[i].bitangent = Math::normalize(Math::cross(vertices[i].normal, vertices[i].tangent)) * -1;
else
vertices[i].bitangent =
Math::normalize(Math::cross(vertices[i].normal, vertices[i].tangent));
}
Vertex shader
void main()
{
UV = vec2(inUv.x, 1.0f - inUv.y);
mat3 nmat = mat3(((ubo.model)));
gl_Position = ubo.mvp * vec4(inPosition, 1.0f);
Normal = normalize(nmat * inNormal);
vec3 T = normalize(nmat * inTangent);
vec3 B = normalize(nmat * inBitangent);
vec3 N = Normal;
//T = normalize(T - N * dot(T, N));
//B = normalize(cross(T, N));
mat3 TBN = mat3(T, B, N);
toModelSpace = TBN;
fragPos = vec3(ubo.model * vec4(inPosition, 1.0f));
viewPos = ubo.cameraPos;
This is how I convert normal map to model space
vec3 normal = normalize(texture(imageBump, UV).rgb * 2.0f - 1.0f);
normal = normalize(toModelSpace * normal);

GLSL Bump Mapping Point Light

Recognize any logic errors? Any suggestions to improve this code or to enhance the results?
Image: Before & After
Video: Short video demonstrating current bump mapping results!
The normal and tangent vectors being sent to the vertex shader are correct.
Method in fragment shader:
vec3 processPointLight(in vec3 norm, in vec3 pos, in int lightIndex)
{
float distance = length(lights[lightIndex].Position.xyz - pos.xyz);
//Light Attenuation Computation
float attenuation = 1.0f / (
(lights[lightIndex].AttenuationConst +
lights[lightIndex].AttenuationLinear * distance +
lights[lightIndex].AttenuationQuad * distance * distance));
vec3 textureDifResult;
vec3 textureSpecResult;
if (Toggle_Textures)
{
vec3 textureNrmResult;
textureDifResult = textureLod(Texture0, ((fs_in.textureCoord.xy * (vec2(1024) / vec2(7168))) + (vec2(1024) * (fs_in.textureUnits.x + 0)) / vec2(7168)), 0).rgb;
textureNrmResult = textureLod(Texture0, ((fs_in.textureCoord.xy * (vec2(1024) / vec2(7168))) + (vec2(1024) * (fs_in.textureUnits.x + 1)) / vec2(7168)), 0).rgb;
textureSpecResult = textureLod(Texture0, ((fs_in.textureCoord.xy * (vec2(1024) / vec2(7168))) + (vec2(1024) * (fs_in.textureUnits.x + 2)) / vec2(7168)), 0).rgb;
// Transform normal vector to range [-1,1]
norm = (textureNrmResult * 2.0f - vec3(1.0f));
}
else
{
textureDifResult = ModelColor.rgb;
textureSpecResult = ModelColor.rgb;
}
//viewPoint, normal, tangent and bitangent are in view space so we need to transform the light's position to view space too.. Special thanks to pleup!
vec3 L =normalize( mat3(fs_in.mv_Matrix) * lights[lightIndex].Position.xyz ) - fs_in.viewPoint;
vec3 lightDir = normalize(vec3(dot(L, fs_in.tangent.xyz), dot(L, fs_in.bitangent.xyz), dot(L, norm)));
vec3 reflection = reflect(-L, norm);
// Light Diffuse Computation
vec3 diffuse = ( max(0.0f, dot( norm ,lightDir )) * lights[lightIndex].Diffuse * textureDifResult);
// Light Ambience Computation
vec3 ambient = lights[lightIndex].Ambient;
// Light Specular Computation
vec3 specular = vec3(0.0f, 0.0f, 0.0f);
//if (dot(lightDir , norm ) > 0.0f)
specular = pow(max(0.0f, dot(reflect(-lightDir, norm), fs_in.eyeDir.xyz )), 20.0) * textureSpecResult * lights[lightIndex].Specular * lights[lightIndex].SpecularPower;
return min((ambient)+attenuation * (diffuse + specular), vec3(1.0));
}
Complete Vertex Shader:
#version 420 core
layout(location=0) in vec4 in_Position;
layout(location=1) in vec4 in_TexCoord;
layout(location=2) in vec4 in_Normal;
layout(location=3) in vec4 in_Tangent;
layout(location=4) in uvec4 in_TextureUnits;
// per frame
uniform mat4 ViewMatrix;
uniform mat4 ProjectionMatrix;
uniform vec4 EyePosition;
// per item
uniform mat4 ModelMatrix;
uniform mat4 ModelMatrixOrientation;
out VS_OUT
{
vec4 eyePosition;
vec4 worldPosition;
vec4 normal;
vec4 tangent;
vec4 bitangent;
vec4 eyeDir;
vec3 textureCoord;
flat uvec4 textureUnits;
vec3 viewPoint;
mat4 mv_Matrix;
} vs_out;
void main()
{
gl_Position = ModelMatrix * in_Position;
vs_out.worldPosition = ModelMatrix * in_Position;
vs_out.eyePosition = ProjectionMatrix * ViewMatrix * ModelMatrix * in_Position;
vs_out.textureCoord = vec3(in_TexCoord.x, in_TexCoord.y, in_TexCoord.z);
vs_out.textureUnits = in_TextureUnits;
gl_Position = ProjectionMatrix * ViewMatrix * gl_Position;
vs_out.mv_Matrix = ViewMatrix * ModelMatrix;
//vertex Position in view space..
vec4 P = vs_out.mv_Matrix * in_Position;
vs_out.viewPoint= P.xyz;
vs_out.normal = normalize(vec4(mat3( vs_out.mv_Matrix) * in_Normal.xyz, 1.0f));
// Calculate normal (N) and tangent (T) vectors in view space from incoming object space vectors.
vs_out.tangent = normalize(vec4(mat3( vs_out.mv_Matrix ) * in_Tangent.xyz, 1.0f) );
// Calculate the bitangent vector (B) from the normal and tangent vectors
vs_out.bitangent = vec4( cross(vs_out.normal.xyz, vs_out.tangent.xyz), 1.0f);
// The view vector is the vector from the point of interest to the viewer, which in view space is simply the negative of the position.
// Calculate that and multiply it by the TBN matrix.
vec3 V = -P.xyz;
vs_out.eyeDir = normalize(vec4(dot(V, vs_out.tangent.xyz), dot(V, vs_out.bitangent.xyz), dot(V, vs_out.normal.xyz),1.0f));
return;
}
Light properties:
<LightComponent type="PointLight" isEnabled="1" specularPower="1.0f" offsetX="0.0f" offsetY="40.0f" offsetZ="0.0f" ambientR="0.0f" ambientG="0.0f" ambientB="0.0f" diffuseR="0.3f" diffuseG="0.3f" diffuseB="0.3f" specularR="0.3f" specularG="0.3f" specularB="0.3f" attenConst="0.3f" attenLinear="0.000091427f" attenQuad="0.0000065429f" />
Texture Manager function that loads the textures:
void cTextureManager::loadTextureMipmap(rapidxml::xml_node<>* textureNode){
std::string pathDif = textureNode->first_attribute("pathDif")->value();
std::string pathNrm = textureNode->first_attribute("pathNrm")->value();
std::string pathSpec = textureNode->first_attribute("pathSpec")->value();
FREE_IMAGE_FORMAT imgFormat = FreeImage_GetFileType(pathDif.c_str(), 0);//Get current format (assume all textures are the same format)
FIBITMAP* imagenDif = FreeImage_Load(imgFormat, pathDif.c_str());
imagenDif = FreeImage_ConvertTo32Bits(imagenDif);
FIBITMAP* imagenNrm = FreeImage_Load(imgFormat, pathNrm.c_str());
imagenNrm = FreeImage_ConvertTo32Bits(imagenNrm);
FIBITMAP* imagenSpec = FreeImage_Load(imgFormat, pathSpec.c_str());
imagenSpec = FreeImage_ConvertTo32Bits(imagenSpec);
int width = FreeImage_GetWidth(imagenDif); // Assume images are the same size..
int height = FreeImage_GetHeight(imagenDif);
GLubyte* textureDif = new GLubyte[4 * width*height];
GLubyte* textureNrm = new GLubyte[4 * width*height];
GLubyte* textureSpec = new GLubyte[4 * width*height];
char* pixelsDif = (char*)FreeImage_GetBits(imagenDif);
char* pixelsNrm = (char*)FreeImage_GetBits(imagenNrm);
char* pixelsSpec = (char*)FreeImage_GetBits(imagenSpec);
//FreeImage loads in BGR format, so you need to swap some bytes(Or use GL_BGR).
for (int j = 0; j<width*height; j++) {
textureDif[j * 4 + 0] = pixelsDif[j * 4 + 2];
textureDif[j * 4 + 1] = pixelsDif[j * 4 + 1];
textureDif[j * 4 + 2] = pixelsDif[j * 4 + 0];
textureDif[j * 4 + 3] = pixelsDif[j * 4 + 3];
textureNrm[j * 4 + 0] = pixelsNrm[j * 4 + 2];
textureNrm[j * 4 + 1] = pixelsNrm[j * 4 + 1];
textureNrm[j * 4 + 2] = pixelsNrm[j * 4 + 0];
textureNrm[j * 4 + 3] = pixelsNrm[j * 4 + 3];
textureSpec[j * 4 + 0] = pixelsSpec[j * 4 + 2];
textureSpec[j * 4 + 1] = pixelsSpec[j * 4 + 1];
textureSpec[j * 4 + 2] = pixelsSpec[j * 4 + 0];
textureSpec[j * 4 + 3] = pixelsSpec[j * 4 + 3];
}
int tempMipmapLevel = gCurrentMipmapLevel;
std::string name = textureNode->first_attribute("name")->value();
gMap_TextureNameToMipmapLevel[name] = tempMipmapLevel;
gCurrentMipmapLevel+= 3;
// Assume the texture is already bound to the GL_TEXTURE_2D target
glTexSubImage2D(GL_TEXTURE_2D, // 2D texture
0, // Level 0
(1024 * tempMipmapLevel), (1024 * tempMipmapLevel), // Offset 0, 0
1024, 1024, // 1024 x 1024 texels
GL_RGBA, // Four channel data
GL_UNSIGNED_BYTE, // Floating point data
(GLvoid*)textureDif); // Pointer to data
delete [] textureDif;
// Assume the texture is already bound to the GL_TEXTURE_2D target
glTexSubImage2D(GL_TEXTURE_2D, // 2D texture
0,
1024 * (tempMipmapLevel+1), 1024 * (tempMipmapLevel+1),
1024, 1024,
GL_RGBA,
GL_UNSIGNED_BYTE,
(GLvoid*)textureNrm);
delete[] textureNrm;
// Assume the texture is already bound to the GL_TEXTURE_2D target
glTexSubImage2D(GL_TEXTURE_2D, // 2D texture
0,
1024 * (tempMipmapLevel +2), 1024 * (tempMipmapLevel+2),
1024, 1024,
GL_RGBA,
GL_UNSIGNED_BYTE,
(GLvoid*)textureSpec);
delete[] textureSpec;
// int MaxTextureImageUnits;
// glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &MaxTextureImageUnits);
}
Thanks in advance for any advice!

GLSL Shader Ported From HLSL Is Not Working

I have this HLSL Shader for blur:
struct VS_INPUT
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
float4 Color : TEXCOORD1;
};
struct VS_OUTPUT
{
float4 Position : POSITION0;
float4 Color : COLOR0;
float2 TexCoord : TEXCOORD0;
};
float4x4 al_projview_matrix;
VS_OUTPUT vs_main(VS_INPUT Input)
{
VS_OUTPUT Output;
Output.Position = mul(Input.Position, al_projview_matrix);
Output.Color = Input.Color;
Output.TexCoord = Input.TexCoord;
return Output;
}
Frag
texture al_tex;
sampler2D s = sampler_state {
texture = <al_tex>;
};
int tWidth;
int tHeight;
float blurSize = 5.0;
float4 ps_main(VS_OUTPUT Input) : COLOR0
{
float2 pxSz = float2(1.0 / tWidth,1.0 / tHeight);
float4 outC = 0;
float outA = 0;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-4.0 * pxSz.y * blurSize)).a * 0.05;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-3.0 * pxSz.y * blurSize)).a * 0.09;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-2.0 * pxSz.y * blurSize)).a * 0.12;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-pxSz.y * blurSize)).a * 0.15;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,0)).a * 0.16;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,pxSz.y * blurSize)).a * 0.15;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,2.0 * pxSz.y * blurSize)).a * 0.12;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,3.0 * pxSz.y * blurSize)).a * 0.09;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,4.0 * pxSz.y * blurSize)).a * 0.05;
outC.a = outA;
return outC;
}
There is a similar one for horizontal...
The idea is, I provide tWidth, tHeight for the texture with and height, and use that to get the 'size' of a pixel relative to UV coords.
I then use this to do normal blur by taking a weighted average of neighbors.
I ported this to GLSL:
attribute vec4 al_pos;
attribute vec4 al_color;
attribute vec2 al_texcoord;
uniform mat4 al_projview_matrix;
varying vec4 varying_color;
varying vec2 varying_texcoord;
void main()
{
varying_color = al_color;
varying_texcoord = al_texcoord;
gl_Position = al_projview_matrix * al_pos;
}
Frag
uniform sampler2D al_tex;
varying float blurSize;
varying float tWidth;
varying float tHeight;
varying vec2 varying_texcoord;
varying vec4 varying_color;
void main()
{
vec4 sum = vec4(0.0);
vec2 pxSz = vec2(1.0 / tWidth,1.0 / tHeight);
// blur in x
// take nine samples, with the distance blurSize between them
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-4.0 * pxSz.y * blurSize))* 0.05;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-3.0 * pxSz.y * blurSize))* 0.09;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-2.0 * pxSz.y * blurSize))* 0.12;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-pxSz.y * blurSize))* 0.15;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,0))* 0.16;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,pxSz.y * blurSize))* 0.15;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,2.0 * pxSz.y * blurSize))* 0.12;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,3.0 * pxSz.y * blurSize))* 0.09;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,4.0 * pxSz.y * blurSize))* 0.05;
gl_FragColor = varying_color * sum;
}
This is a little different, but it's the same logic. I convert pixel coords to UV coords, and multiply by the blur factor, same as the hlsl factor. Yet, the glsl one gives me an unblurred, slightly more transparent version of the original.
What could cause this?
In your fragment shader, you have:
varying vec4 varying_color;
[...]
gl_FragColor = varying_color;
so all the texture fetches and calculations you do don't have any effect on the final shader output (and are likely to be completely removed by the compiler). You probably want to output sum or to modify it, e.g. with gl_FragColor = varying_color * sum; or whatever effect you want to achieve.
Another thing: in the frag shader, you define varyings for the texture size, but you don't pass them from the vertex shader. Those should be uniforms (or, in modern GLSL, there is also the textureSize() GLSL function which allows you to directly sccess that values without explicitely passing them).

OpenGL FXAA issues

I am a Computer Science Student and I am currently doing a 2d Game for my Coursework.
I know that it's kind of unnecessary, but I tried to implement the FXAA algorithm for the AA of the game. The shader I am using is this one, because I thought using just a simple frag shader would be alright. I can also be found on Github.
#version 120
#define FXAA_REDUCE_MIN (1.0/128.0)
#define FXAA_REDUCE_MUL (1.0/8.0)
#define FXAA_SPAN_MAX 8.0
uniform sampler2D sampler0;
uniform vec2 resolution;
void main(){
vec2 inverse_resolution=vec2(1.0/resolution.x,1.0/resolution.y);
vec3 rgbNW = texture2D(sampler0, (gl_FragCoord.xy + vec2(-1.0,-1.0)) * inverse_resolution).xyz;
vec3 rgbNE = texture2D(sampler0, (gl_FragCoord.xy + vec2(1.0,-1.0)) * inverse_resolution).xyz;
vec3 rgbSW = texture2D(sampler0, (gl_FragCoord.xy + vec2(-1.0,1.0)) * inverse_resolution).xyz;
vec3 rgbSE = texture2D(sampler0, (gl_FragCoord.xy + vec2(1.0,1.0)) * inverse_resolution).xyz;
vec3 rgbM = texture2D(sampler0, gl_FragCoord.xy * inverse_resolution).xyz;
vec3 luma = vec3(0.299, 0.587, 0.114);
float lumaNW = dot(rgbNW, luma);
float lumaNE = dot(rgbNE, luma);
float lumaSW = dot(rgbSW, luma);
float lumaSE = dot(rgbSE, luma);
float lumaM = dot(rgbM, luma);
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
vec2 dir;
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL),FXAA_REDUCE_MIN);
float rcpDirMin = 1.0/(min(abs(dir.x), abs(dir.y)) + dirReduce);
dir = min(vec2( FXAA_SPAN_MAX, FXAA_SPAN_MAX),max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX),dir * rcpDirMin)) * inverse_resolution;
vec3 rgbA = 0.5 * (texture2D(sampler0, gl_FragCoord.xy * inverse_resolution + dir * (1.0/3.0 - 0.5)).xyz + texture2D(sampler0, gl_FragCoord.xy * inverse_resolution + dir * (2.0/3.0 - 0.5)).xyz);
vec3 rgbB = rgbA * 0.5 + 0.25 * (texture2D(sampler0, gl_FragCoord.xy * inverse_resolution + dir * - 0.5).xyz + texture2D(sampler0, gl_FragCoord.xy * inverse_resolution + dir * 0.5).xyz);
float lumaB = dot(rgbB, luma);
if((lumaB < lumaMin) || (lumaB > lumaMax)) {
gl_FragColor = vec4(rgbA,1.0);
} else {
gl_FragColor = vec4(rgbB,1.0);
}
}
The problem I ran into is, that whenever I turn the shader on my screen is flipped upside down... How do I solve this? It's probably because of the Orthographic Projection, but I am just a student, so I can't come up with a solution.
It seems to be a texture coordinate problem. Use 1-V instead of V to flip your mirrored image.
vec2 correctedFragCoord;
correctedFragCoord.x = gl_FragCoord.x;
correctedFragCoord.y = resolution.y - gl_FragCoord.y;
Then replace gl_FragCoord by correctedFragCoord in the shader.