This is my Fragment shader code where i am applying gaussian blur to a Texture2D image.
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0f / (sqrt(2.0f * pi) * BlurValue );
incrementalGaussian.y = exp(-0.5f / (BlurValue * BlurValue ));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0f, 0.0f, 0.0f, 0.0f);
float coefficientSum = 0.0f;
// Take the central sample first...
avgValue += texture2D(text, TexCoords.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
//Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0f; i <= numBlurPixelsPerSide ; i++) {
avgValue += texture2D(text, TexCoords.st - i * 0.01f *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(text, TexCoords.st + i * 0.01f *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
avgValue.g = avgValue.r;
avgValue.b = avgValue.r;
color = avgValue * vec4(textColor, 1.0) / coefficientSum ;
}
this only applies a horizontal blur , How can i also add vertical Gaussian blur.
In general for the gaussian blur are used 2 passes. For the vertical blur, you've to add a 2nd pass. First do the horizontal blur, then apply the vertical blur to the result.
For the horizontal blur the u-component of the texture coordinate is displaced:
for (float i = 1.0f; i <= numBlurPixelsPerSide ; i++) {
float offset = i * 0.01f * blurMultiplyVec;
avgValue += texture2D(text, TexCoords.st - vec2(offset, 0.0) * incrementalGaussian.x;
avgValue += texture2D(text, TexCoords.st + vec2(offset, 0.0) * incrementalGaussian.x;
// ...
}
And for the vertical blur the v-component of the texture coordinate is displaced:
for (float i = 1.0f; i <= numBlurPixelsPerSide ; i++) {
float offset = i * 0.01f * blurMultiplyVec;
avgValue += texture2D(text, TexCoords.st - vec2(0.0, offset) * incrementalGaussian.x;
avgValue += texture2D(text, TexCoords.st + vec2(0.0, offset) * incrementalGaussian.x;
// ...
}
A related question is What kind of blurs can be implemented in pixel shaders?
There are a lot of good tutorials all over the web, e.g. LearnOpenGL.com - Gaussian blur
Related
I'm trying to write a custom shader where I first blur the texture and then run sobel edge finding.
I've got sobel running ok via the following script
vec4 colorSobel = texture2D(texture, uv);
float bottomLeftIntensity = texture2D(texture, uv + vec2(-0.0015625, 0.0020833)).r;
float topRightIntensity = texture2D(texture, uv + vec2(0.0015625, -0.0020833)).r;
float topLeftIntensity = texture2D(texture, uv + vec2(-0.0015625, 0.0020833)).r;
float bottomRightIntensity = texture2D(texture, uv + vec2(0.0015625, 0.0020833)).r;
float leftIntensity = texture2D(texture, uv + vec2(-0.0015625, 0)).r;
float rightIntensity = texture2D(texture, uv + vec2(0.0015625, 0)).r;
float bottomIntensity = texture2D(texture, uv + vec2(0, 0.0020833)).r;
float topIntensity = texture2D(texture, uv + vec2(0, -0.0020833)).r;
float h = -secondary * topLeftIntensity - coef * topIntensity - secondary * topRightIntensity + secondary * bottomLeftIntensity + coef * bottomIntensity + secondary * bottomRightIntensity;
float v = -secondary * bottomLeftIntensity - coef * leftIntensity - secondary * topLeftIntensity + secondary * bottomRightIntensity + coef * rightIntensity + secondary * topRightIntensity;
float mag = length(vec2(h, v));
// alpha values removed atm
if (mag < 0.5) {
colorSobel.rgb *= (1.0 - 1.0);
colorSobel.r += 0.0 * 1.0;
colorSobel.g += 0.0 * 1.0;
colorSobel.b += 0.0 * 1.0;
colorSobel.rgb += 1.0 * mag;
} else {
colorSobel.rgb *= (1.0 - 1.0);
colorSobel.r += 0.0 * 1.0;
colorSobel.g += 0.0 * 1.0;
colorSobel.b += 0.0 * 1.0;
colorSobel.rgb += 1.0 * mag;
}
gl_FragColor = colorSobel;
However I know it works by sampling the texture via texture2D.
If I were to first manipulate the output via a simple script such as this which reduces the colours
vec4 bg = texture2D(texture,uv);
gl_FragColor = vec4(gb.rgb, 1.0);
gl_FragColor.r = float(floor(gl_FragColor.r * 0.5 ) / 0.5);
gl_FragColor.g = float(floor(gl_FragColor.g * 0.5 ) / 0.5);
gl_FragColor.b = float(floor(gl_FragColor.b * 0.5 ) / 0.5);
The output still samples from the texture and ignores the first paint.
Is there a way to sample from the color output rather than using texture2D?
The reason I'm asking is that i'm chaining my shaders at runtime depending on user interaction?
I've been following this tutorial to implement my variance shadow mapping feature for point light in deferred rendering.
I'm using GLSL 3.3, left-handed coordinate system. Here is what I've been doing:
I render the scene to dual-paraboloid maps, storing depth and depth * depth.
Result:
Above image contains front and back maps. The point light is at the center of scene, you can see where it glows yellow the most.
Then I set up a full-screen shader pass.
I do this by transforming the tutorial code from FX to GLSL.
Author's .fx code:
float4 TexturePS(float3 normalW : TEXCOORD0, float2 tex0 : TEXCOORD1, float3 pos : TEXCOORD2) : COLOR
{
float4 texColor = tex2D(TexS, tex0 * TexScale);
pos = mul(float4(pos, 1.0f), LightView);
float L = length(pos);
float3 P0 = pos / L;
float alpha = .5f + pos.z / LightAttenuation;
P0.z = P0.z + 1;
P0.x = P0.x / P0.z;
P0.y = P0.y / P0.z;
P0.z = L / LightAttenuation;
P0.x = .5f * P0.x + .5f;
P0.y = -.5f * P0.y + .5f;
float3 P1 = pos / L;
P1.z = 1 - P1.z;
P1.x = P1.x / P1.z;
P1.y = P1.y / P1.z;
P1.z = L / LightAttenuation;
P1.x = .5f * P1.x + .5f;
P1.y = -.5f * P1.y + .5f;
float depth;
float mydepth;
float2 moments;
if(alpha >= 0.5f)
{
moments = tex2D(ShadowFrontS, P0.xy).xy;
depth = moments.x;
mydepth = P0.z;
}
else
{
moments = tex2D(ShadowBackS, P1.xy).xy;
depth = moments.x;
mydepth = P1.z;
}
float lit_factor = (mydepth <= moments[0]);
float E_x2 = moments.y;
float Ex_2 = moments.x * moments.x;
float variance = min(max(E_x2 - Ex_2, 0.0) + SHADOW_EPSILON, 1.0);
float m_d = (moments.x - mydepth);
float p = variance / (variance + m_d * m_d); //Chebychev's inequality
texColor.xyz *= max(lit_factor, p + .2f);
return texColor;
}
My translated GLSL code:
void main() {
vec3 in_vertex = texture(scenePosTexture, texCoord).xyz; // get 3D vertex from 2D screen coordinate
vec4 vert = lightViewMat * vec4(in_vertex, 1); // project vertex to point light space (view from light position, look target is -Z)
float L = length(vert.xyz);
float distance = length(lightPos - in_vertex);
float denom = distance / lightRad + 1;
float attenuation = 1.0 / (denom * denom);
// to determine which paraboloid map to use
float alpha = vert.z / attenuation + 0.5f;
vec3 P0 = vert.xyz / L;
P0.z = P0.z + 1;
P0.x = P0.x / P0.z;
P0.y = P0.y / P0.z;
P0.z = L / attenuation;
P0.x = .5f * P0.x + .5f;
P0.y = -.5f * P0.y + .5f;
vec3 P1 = vert.xyz / L;
P1.z = 1 - P1.z;
P1.x = P1.x / P1.z;
P1.y = P1.y / P1.z;
P1.z = L / attenuation;
P1.x = .5f * P1.x + .5f;
P1.y = -.5f * P1.y + .5f;
// Variance shadow mapping
float depth;
float mydepth;
vec2 moments;
if(alpha >= 0.5f)
{
moments = texture(shadowMapFrontTexture, P0.xy).xy;
depth = moments.x;
mydepth = P0.z;
}
else
{
moments = texture(shadowMapBackTexture, P1.xy).xy;
depth = moments.x;
mydepth = P1.z;
}
// Original .fx code is: float lit_factor = (mydepth <= moments[0]);
// I'm not sure my translated code belew is correct
float lit_factor = 0;
if (mydepth <= moments.x)
lit_factor = mydepth;
else
lit_factor = moments.x;
float E_x2 = moments.y;
float Ex_2 = moments.x * moments.x;
float variance = min(max(E_x2 - Ex_2, 0.0) + SHADOW_EPSILON, 1.0);
float m_d = (moments.x - mydepth);
float p = variance / (variance + m_d * m_d); //Chebychev's inequality
fragColor = texture(sceneTexture, texCoord).rgb; // sample original color
fragColor.rgb *= max(lit_factor, p + .2f);
}
Render result
Right now I'm clueless about where I'm gonna touch to render the shadow correctly. Could someone point it out for me?
Some friend of mine pointed out that the Y component is flipped, that's why shadow looked like up-side down. After adding minus to P0 and P1's Y, it starts to show quite reasonable shadow:
But another problem is the location of shadow is wrong.
Why do you duplicate the paraboloid projection computation ?
You compute it on 'vert', then 'P0' and 'P1', shouldn't you do it only on 'P0' and 'P1' ? (The original code doesn't do this thing on 'pos').
EDIT:
Your lit_factor is wrong, it should be either 0.0 or 1.0.
You could use the step() GLSL intrinsic, in this way :
float lit_factor = step(mydepth, moments[0]);
I have created a simple 2D area using OpenGL, comprised of tiles. These tiles have been stretched relative to the screen's aspect ratio by default. To fix this I have attempted to use an orthographic projection matrix. Here is how I created it:
public void createProjectionMatrix() {
float left = 0;
float right = DisplayManager.getScreenWidth();
float top = 0;
float bottom = DisplayManager.getScreenHeight();
float near = 1;
float far = -1;
projectionMatrix.m00 = 2 / (r - l);
projectionMatrix.m11 = 2 / (t - b);
projectionMatrix.m22 = -2 / (f - n);
projectionMatrix.m30 = - (r + l) / (r - l);
projectionMatrix.m31 = - (t + b) / (t - b);
projectionMatrix.m32 = - (f + n) / (f - n);
projectionMatrix.m33 = 1;
}
The problem probably lies here but I just can't find it. I then call this method with the creation of my renderer, store it in a uniform variable and use it in the vertex shader like so:
vec4 worldPosition = transformationMatrix * vec4(position, 0, 1);
gl_Position = projectionMatrix * viewMatrix * worldPosition;
Where projectionMatrix is a mat4 which corresponds to the previously created orthographic projection matrix.
Right now absolutely nothing except for the clear color renders.
EDIT:
The orthographic projection matrix is created and loaded into the shaders right after the renderer's creation and after the shader's creation.
public Renderer() {
createOrthoMatrix();
terrainShader.start();
terrainShader.loadProjectionMatrix(projectionMatrix);
terrainShader.stop();
GL11.glEnable(GL13.GL_MULTISAMPLE);
GL11.glClearColor(0, 0, 0.5f, 1);
}
The rest of the matrices are passed in at each render with the loadUniforms() method.
for(Terrain t : batch) {
loadUniforms(t, terrainManager, camera, lights);
GL11.glDrawElements(GL11.GL_TRIANGLES, model.getModel().getVertexCount(), GL11.GL_UNSIGNED_INT, 0);
}
private void loadUniforms(Terrain t, TerrainManager tm, Camera camera, List<Light> lights) {
Matrix4f matrix = Maths.createTransformationMatrix(t.getPosition(), 0, 0, 0, 1);
terrainShader.loadTransformationMatrix(matrix);
terrainShader.loadViewMatrix(camera);
terrainShader.loadNumberOfRows(tm.getNumberOfRows());
terrainShader.loadOffset(t.getOffset());
terrainShader.loadLights(lights);
}
Finally this is what the vertex shader looks like:
#version 400 core
in vec2 position;
uniform mat4 transformationMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
void main(void) {
vec4 worldPosition = transformationMatrix * vec4(position, 0, 1);
gl_Position = projectionMatrix * viewMatrix * worldPosition;
}
It has been a long and arduous task (if I may sound incompetent myself). But I have found a solution to my problem, there is probably a better way to solve it, but this is how I did it.
I changed the createProjectionMatrix() to look like
public void createProjectionMatrix() {
float width = Display.getWidth();
float height = Display.getHeight();
float left = -width;
float right = width * 1f;
float top = height * 1f;
float bottom = -height;
float near = 0;
float far = 10;
projectionMatrix.m00 = (2f / (right - left)) * 1000;
projectionMatrix.m11 = (2f / (top - bottom)) * 1000;
projectionMatrix.m22 = 2f / (far - near);
projectionMatrix.m30 = - (right + left) / (right - left);
projectionMatrix.m31 = - (top + bottom) / (top - bottom);
projectionMatrix.m32 = -(far + near) / (far - near);
projectionMatrix.m33 = 1;
}
Multiplying m00 and m11 by a large number is the only way I am able to see anything besides the clear color. If I remember this correctly, it is because the renderer is rendering at less than a pixel. This idea was presented to me by #NicoSchertler. So thank you very much! The shaders looks the same and now it runs well. If anyone has a less bootleg solution please share it, as I will be glad to see how it was solved. Here is a link that was very helpful to me, OpenGL 3+ with orthographic projection of directional light.
This will solve your problem with the aspect. Try it out:
public void createProjectionMatrix() {
float srcaspect = 4f / 3f; /* Default aspect ratio to scale ortho, can be other than 4:3 display origin. */
float dstaspect = DisplayManager.getScreenWidth() / DisplayManager.getScreenHeight();
float yscale = (dstaspect < (1f / 1f) ? dstaspect : 1f / 1f) / (1f / 1f);
float scale = 0.5f*(DisplayManager.getScreenHeight());
float top = scale - (scale / yscale);
float bottom = scale + (scale / yscale);
float left = scale - (scale * dstaspect / yscale) - (scale - (scale * srcaspect));
float right = scale + (scale * dstaspect / yscale) - (scale - (scale * srcaspect));
float near = 10;
float far = 1000;
projectionMatrix.m00 = 2 / (r - l);
projectionMatrix.m11 = 2 / (t - b);
projectionMatrix.m22 = -2 / (f - n);
projectionMatrix.m30 = - (r + l) / (r - l);
projectionMatrix.m31 = - (t + b) / (t - b);
projectionMatrix.m32 = - (f + n) / (f - n);
projectionMatrix.m33 = 1;
}
I'm drawing a quad using Geometry Shader, but can't figure out how to rotate it with angle.
void main(void)
{
float scaleX = 2.0f / u_resolution.x;
float scaleY = 2.0f / u_resolution.y;
float nx = (u_position.x * scaleX) - 1.0f;
float ny = -(u_position.y * scaleY) + 1.0f;
float nw = u_size.x * scaleX;
float nh = u_size.y * scaleY;
gl_Position = vec4( nx+nw, ny, 0.0, 1.0 );
texcoord = vec2( 1.0, 0.0 );
EmitVertex();
gl_Position = vec4(nx, ny, 0.0, 1.0 );
texcoord = vec2( 0.0, 0.0 );
EmitVertex();
gl_Position = vec4( nx+nw, ny-nh, 0.0, 1.0 );
texcoord = vec2( 1.0, 1.0 );
EmitVertex();
gl_Position = vec4(nx, ny-nh, 0.0, 1.0 );
texcoord = vec2( 0.0, 1.0 );
EmitVertex();
EndPrimitive();
}
Should I use a rotation matrix or sin and cos? I'm not too great at math.
You don't have to use matrices, but you need to use those sine functions. Here is a way to rotate a 3D position about some arbitrary axis by some angle specified in degrees:
// This is the 3D position that we want to rotate:
vec3 p = position.xyz;
// Specify the axis to rotate about:
float x = 0.0;
float y = 0.0;
float z = 1.0;
// Specify the angle in radians:
float angle = 90.0 * 3.14 / 180.0; // 90 degrees, CCW
vec3 q;
q.x = p.x * (x*x * (1.0 - cos(angle)) + cos(angle))
+ p.y * (x*y * (1.0 - cos(angle)) + z * sin(angle))
+ p.z * (x*z * (1.0 - cos(angle)) - y * sin(angle));
q.y = p.x * (y*x * (1.0 - cos(angle)) - z * sin(angle))
+ p.y * (y*y * (1.0 - cos(angle)) + cos(angle))
+ p.z * (y*z * (1.0 - cos(angle)) + x * sin(angle));
q.z = p.x * (z*x * (1.0 - cos(angle)) + y * sin(angle))
+ p.y * (z*y * (1.0 - cos(angle)) - x * sin(angle))
+ p.z * (z*z * (1.0 - cos(angle)) + cos(angle));
gl_Position = vec4(q, 1.0);
If you know that you are rotating about some standard x-, y-, or z-axis, you can simplify the "algorithm" a lot by defining it explicitly for that standard axis.
Notice how we rotate about the z-axis in the above code. For example, rotation about the x-axis would be (x,y,z) = (1,0,0). You could set the variables to anything, but the values should result in the axis being a unit vector (if that even matters.)
Then again, you might as well use matrices:
vec3 n = vec3(0.0, 0.0, 1.0); // the axis to rotate about
// Specify the rotation transformation matrix:
mat3 m = mat3(
n.x*n.x * (1.0f - cos(angle)) + cos(angle), // column 1 of row 1
n.x*n.y * (1.0f - cos(angle)) + n.z * sin(angle), // column 2 of row 1
n.x*n.z * (1.0f - cos(angle)) - n.y * sin(angle), // column 3 of row 1
n.y*n.x * (1.0f - cos(angle)) - n.z * sin(angle), // column 1 of row 2
n.y*n.y * (1.0f - cos(angle)) + cos(angle), // ...
n.y*n.z * (1.0f - cos(angle)) + n.x * sin(angle), // ...
n.z*n.x * (1.0f - cos(angle)) + n.y * sin(angle), // column 1 of row 3
n.z*n.y * (1.0f - cos(angle)) - n.x * sin(angle), // ...
n.z*n.z * (1.0f - cos(angle)) + cos(angle) // ...
);
// Apply the rotation to our 3D position:
vec3 q = m * p;
gl_Position = vec4(q, 1.0);
Notice how the elements of the matrix are laid out such that we first complete the first column, and then the second, and so on; the matrix is in column-major order. This matters when you try to transfer a matrix written in mathematical notation into a data type in your code. You basically need to transpose it (to flip the elements diagonally) in order to use it in your code. Also, we are essentially multiplying a matrix on left with a column vector on right.
If you need a 4-by-4 homogeneous matrix, then you would simply add an extra column and a row into the above matrix, such that both the new rightmost column and bottommost row would consist of [0 0 0 1]:
vec4 p = position.xyzw; // new dimension
vec3 n = ...; // same
mat4 m = mat4( // new dimension
...
0.0,
...
0.0,
...
0.0,
0.0,
0.0,
0.0,
1.0
);
vec4 q = m * p;
gl_Position = q;
Again, notice the order of the multiplication when applying the rotation, it is important because it affects the end result. What happens in the multiplication is basically that a new vector is formed by calculating the dot-product of the position vector and each column in the matrix; each coordinate in the resulting vector is the dot-product of the original vector and a single column in the matrix (see the first code example.)
The
q.x = p.x * (x*x * (1.0 - cos(angle)) + cos(angle))
+ p.y * (x*y * (1.0 - cos(angle)) + z * sin(angle))
+ p.z * (x*z * (1.0 - cos(angle)) - y * sin(angle));
Is same as:
q.x = dot(p, m[0]);
One could even compose the matrix with itself: m = m*m; which would result in a 180-degree counterclockwise rotation matrix, depending on the angle used.
I've set up a window (in MFC) that contains on OpenGL 3.2 rendering context. Since it's with OpenGL 3.2 I want to use shaders ect, so I'm biulding my projection and view matrix by hand. I've used this tutorial as an input to build them and pass them to my shader.
The problem now is that (even in the example of the tutorial) when I resize my window, the model is stretched.
This is the code I use to build my matrices (I rebuild them and send them to my shader every the window is refreshed).
View Matrix:
float zAxis[3], xAxis[3], yAxis[3];
float length, result1, result2, result3;
// zAxis = normal(lookAt - position)
zAxis[0] = lookAt[0] - m_position[0];
zAxis[1] = lookAt[1] - m_position[1];
zAxis[2] = lookAt[2] - m_position[2];
length = sqrt((zAxis[0] * zAxis[0]) + (zAxis[1] * zAxis[1]) + (zAxis[2] * zAxis[2]));
zAxis[0] = zAxis[0] / length;
zAxis[1] = zAxis[1] / length;
zAxis[2] = zAxis[2] / length;
// xAxis = normal(cross(up, zAxis))
xAxis[0] = (up[1] * zAxis[2]) - (up[2] * zAxis[1]);
xAxis[1] = (up[2] * zAxis[0]) - (up[0] * zAxis[2]);
xAxis[2] = (up[0] * zAxis[1]) - (up[1] * zAxis[0]);
length = sqrt((xAxis[0] * xAxis[0]) + (xAxis[1] * xAxis[1]) + (xAxis[2] * xAxis[2]));
xAxis[0] = xAxis[0] / length;
xAxis[1] = xAxis[1] / length;
xAxis[2] = xAxis[2] / length;
// yAxis = cross(zAxis, xAxis)
yAxis[0] = (zAxis[1] * xAxis[2]) - (zAxis[2] * xAxis[1]);
yAxis[1] = (zAxis[2] * xAxis[0]) - (zAxis[0] * xAxis[2]);
yAxis[2] = (zAxis[0] * xAxis[1]) - (zAxis[1] * xAxis[0]);
// -dot(xAxis, position)
result1 = ((xAxis[0] * m_position[0]) + (xAxis[1] * m_position[1]) + (xAxis[2] * m_position[2])) * -1.0f;
// -dot(yaxis, eye)
result2 = ((yAxis[0] * m_position[0]) + (yAxis[1] * m_position[1]) + (yAxis[2] * m_position[2])) * -1.0f;
// -dot(zaxis, eye)
result3 = ((zAxis[0] * m_position[0]) + (zAxis[1] * m_position[1]) + (zAxis[2] * m_position[2])) * -1.0f;
viewMatrix[0] = xAxis[0];
viewMatrix[1] = yAxis[0];
viewMatrix[2] = zAxis[0];
viewMatrix[3] = 0.0f;
viewMatrix[4] = xAxis[1];
viewMatrix[5] = yAxis[1];
viewMatrix[6] = zAxis[1];
viewMatrix[7] = 0.0f;
viewMatrix[8] = xAxis[2];
viewMatrix[9] = yAxis[2];
viewMatrix[10] = zAxis[2];
viewMatrix[11] = 0.0f;
viewMatrix[12] = result1;
viewMatrix[13] = result2;
viewMatrix[14] = result3;
viewMatrix[15] = 1.0f;
Projection Matrix:
float screenAspect = (float)rcClient.Width() / (float)rcClient.Height();
float fov = 3.14159265358979323846f / 4.0f;
float zfar = 1000.0;
float znear = 0.1f;
projectionMatrix[0] = 1.0f / (screenAspect * tan( fov * 0.5f));
projectionMatrix[1] = 0.0f;
projectionMatrix[2] = 0.0f;
projectionMatrix[3] = 0.0f;
projectionMatrix[4] = 0.0f;
projectionMatrix[5] = 1.0f / tan( fov * 0.5f);
projectionMatrix[6] = 0.0f;
projectionMatrix[7] = 0.0f;
projectionMatrix[8] = 0.0f;
projectionMatrix[9] = 0.0f;
projectionMatrix[10] = zfar / (zfar - znear);
projectionMatrix[11] = 1.0f;
projectionMatrix[12] = 0.0f;
projectionMatrix[13] = 0.0f;
projectionMatrix[14] = (-znear * zfar) / (zfar - znear);
projectionMatrix[15] = 0.0f;
And in my shader I use them this way:
#version 150
in vec3 inputPosition;
in vec3 inputColor;
out vec3 color;
uniform mat4 worldMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
void main(void)
{
gl_Position = projectionMatrix * viewMatrix * worldMatrix * vec4(inputPosition, 1.0f);
color = inputColor;
}
Does anyone have an idea of what is happening here?
Don't use glViewport since you can just use shaders which is the preferred way.
Can you change your projection matrix to:
float projectionMatrix[4][4];
projectionMatrix[0][0] = 1.0f / (screenAspect * tan( fov * 0.5f));
projectionMatrix[0][1] = 0.0f;
projectionMatrix[0][2] = 0.0f;
projectionMatrix[0][3] = 0.0f;
projectionMatrix[1][0] = 0.0f;
projectionMatrix[1][1] = 1.0f / tan( fov * 0.5f);
projectionMatrix[1][2] = 0.0f;
projectionMatrix[1][3] = 0.0f;
projectionMatrix[2][0] = 0.0f;
projectionMatrix[2][1] = 0.0f;
projectionMatrix[2][2] = (-znear -zfar) / (znear-zfar);
projectionMatrix[2][3] = 2.0f * zfar * znear / (znear-zfar);
projectionMatrix[3][0] = 0.0f;
projectionMatrix[3][1] = 0.0f;
projectionMatrix[3][2] = 1.0;
projectionMatrix[3][3] = 0.0f;
I am not sure about your UVN matrix, but it may not be the problem since you got it working after using glViewport.
Anyway, instead of generating your matrices you can use GLM. Code example on building the projection, UVN, translation matrices, etc is given here: glm code example
I think that this tutorials explain transformations well enough: http://ogldev.atspace.co.uk/