Rust, OpenGL - Distance is equal in code, but not after drawing - opengl

Using rust with glfw and gl bindings.
I have created an array of vertices for usage in vbo:
let x = 2.0 / ((HORIZONTAL_BOOK_AMOUNT * 2) + HORIZONTAL_BOOK_AMOUNT + 1) as f32;
// x is the desired distance based on width of NDC divided by amount of horizontal objects to draw
// in the case of attached picture the HORIZONTAL_BOOK_AMOUNT would be 4
let mut vbo = 0;
let side = x * IMAGE_HEIGHT_MULTIPLIER;
// here x is multiplied by constant, equaling to desired height of object
let vertices: Vec<f32> = vec![
-1.0 + x, 1.0 - x, 0.0, 1.0, //top left
-1.0 + 3.0 * x, 1.0 - x, 1.0, 1.0, // top right
-1.0 + 3.0 * x, 1.0 - x - (2.0 * side), 1.0, 0.0, //bottom right
-1.0 + 3.0 * x, 1.0 - x - (2.0 * side), 1.0, 0.0, //bottom right
-1.0 + x, 1.0 - x - (2.0 * side), 0.0, 0.0, //bottom left
-1.0 + x, 1.0 - x, 0.0, 1.0]; //top left
gl::GenBuffers(1, &mut vbo);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(gl::ARRAY_BUFFER, (vertices.len() * std::mem::size_of::<GLfloat>()) as GLsizeiptr, &vertices[0] as *const f32 as *const std::ffi::c_void, gl::STATIC_DRAW);
The problem is that after drawing the distance between top left vertex and top border should be equal to distance between top left vertex and left border.
The achieved effect, distances are not equal and are forming a rectangle:
I tried recreating the issue with shorter example but it yielded similar results.
Possibly it could be window size issue, but it is just my guess
The desired effect, which forms a square:
VAO code:
let mut VAO = 0;
gl::GenVertexArrays(1, &mut VAO);
gl::BindVertexArray(VAO);
let stride = 4 * std::mem::size_of::<GLfloat>() as GLsizei;
gl::VertexAttribPointer(0, 2, gl::FLOAT, gl::FALSE, stride, std::ptr::null());
gl::EnableVertexAttribArray(0);
gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (2 * std::mem::size_of::<GLfloat>()) as *const std::ffi::c_void);
gl::EnableVertexAttribArray(1);
Vertex shader:
#version 430 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec2 aTexCoord;
out vec2 TexCoord;
uniform mat4 translatematrix;
// this matrix is used for drawing objects by iterating through array // of (in this case:) books based on amount of them and horizontal // // amount
void main()
{
gl_Position = translatematrix * vec4(aPos, 0.0, 1.0);
TexCoord = aTexCoord;
}
Frag shader:
#version 430 core
out vec4 FragColor;
in vec2 TexCoord;
uniform sampler2D tex1;
void main()
{
FragColor = texture(tex1, TexCoord);
}
glViewport is set using glfw window event:
for (_, event) in glfw::flush_messages(&events) {
...
match event {
glfw::WindowEvent::FramebufferSize(width, height) => {
gl::Viewport(0, 0, width, height); },

You calculate the vertices in NDC (normalized device coordinates), which range through [-1, 1] within the viewport. The top-left vertex is x NDC units away from the corner both horizontally and vertically. However, for a non-square viewport, that would be a different amount of pixels.
Instead you should calculate all vertex coordinates in pixels, and incorporate a projection matrix into the translatematrix to convert pixels to NDC in the vertex shader.

You need to scale the position along the Y axis with the inverse aspect ratio:
y = x * width / height

Related

Bad lighting using Phong Method

I'm trying to make a cube, which is irregularly triangulated, but virtually coplanar, shade correctly.
Here is the current result I have:
With wireframe:
Normals calculated in my program:
Normals calculated by meshlabjs.net:
The lighting works properly when using regular size triangles for the cube. As you can see, I'm duplicating vertices and using angle weighting.
lighting.frag
vec4 scene_ambient = vec4(1, 1, 1, 1.0);
struct material
{
vec4 ambient;
vec4 diffuse;
vec4 specular;
float shininess;
};
material frontMaterial = material(
vec4(0.25, 0.25, 0.25, 1.0),
vec4(0.4, 0.4, 0.4, 1.0),
vec4(0.774597, 0.774597, 0.774597, 1.0),
76
);
struct lightSource
{
vec4 position;
vec4 diffuse;
vec4 specular;
float constantAttenuation, linearAttenuation, quadraticAttenuation;
float spotCutoff, spotExponent;
vec3 spotDirection;
};
lightSource light0 = lightSource(
vec4(0.0, 0.0, 0.0, 1.0),
vec4(100.0, 100.0, 100.0, 100.0),
vec4(100.0, 100.0, 100.0, 100.0),
0.1, 1, 0.01,
180.0, 0.0,
vec3(0.0, 0.0, 0.0)
);
vec4 light(lightSource ls, vec3 norm, vec3 deviation, vec3 position)
{
vec3 viewDirection = normalize(vec3(1.0 * vec4(0, 0, 0, 1.0) - vec4(position, 1)));
vec3 lightDirection;
float attenuation;
//ls.position.xyz = cameraPos;
ls.position.z += 50;
if (0.0 == ls.position.w) // directional light?
{
attenuation = 1.0; // no attenuation
lightDirection = normalize(vec3(ls.position));
}
else // point light or spotlight (or other kind of light)
{
vec3 positionToLightSource = vec3(ls.position - vec4(position, 1.0));
float distance = length(positionToLightSource);
lightDirection = normalize(positionToLightSource);
attenuation = 1.0 / (ls.constantAttenuation
+ ls.linearAttenuation * distance
+ ls.quadraticAttenuation * distance * distance);
if (ls.spotCutoff <= 90.0) // spotlight?
{
float clampedCosine = max(0.0, dot(-lightDirection, ls.spotDirection));
if (clampedCosine < cos(radians(ls.spotCutoff))) // outside of spotlight cone?
{
attenuation = 0.0;
}
else
{
attenuation = attenuation * pow(clampedCosine, ls.spotExponent);
}
}
}
vec3 ambientLighting = vec3(scene_ambient) * vec3(frontMaterial.ambient);
vec3 diffuseReflection = attenuation
* vec3(ls.diffuse) * vec3(frontMaterial.diffuse)
* max(0.0, dot(norm, lightDirection));
vec3 specularReflection;
if (dot(norm, lightDirection) < 0.0) // light source on the wrong side?
{
specularReflection = vec3(0.0, 0.0, 0.0); // no specular reflection
}
else // light source on the right side
{
specularReflection = attenuation * vec3(ls.specular) * vec3(frontMaterial.specular)
* pow(max(0.0, dot(reflect(lightDirection, norm), viewDirection)), frontMaterial.shininess);
}
return vec4(ambientLighting + diffuseReflection + specularReflection, 1.0);
}
vec4 generateGlobalLighting(vec3 norm, vec3 position)
{
return light(light0, norm, vec3(2,0,0), position);
}
mainmesh.frag
#version 430
in vec3 f_color;
in vec3 f_normal;
in vec3 f_position;
in float f_opacity;
out vec4 fragColor;
vec4 generateGlobalLighting(vec3 norm, vec3 position);
void main()
{
vec3 norm = normalize(f_normal);
vec4 l0 = generateGlobalLighting(norm, f_position);
fragColor = vec4(f_color, f_opacity) * l0;
}
Follows the code to generate the verts, normals and faces for the painter.
m_vertices_buf.resize(m_mesh.num_faces() * 3, 3);
m_normals_buf.resize(m_mesh.num_faces() * 3, 3);
m_faces_buf.resize(m_mesh.num_faces(), 3);
std::map<vertex_descriptor, std::list<Vector3d>> map;
GLDebugging* deb = GLDebugging::getInstance();
auto getAngle = [](Vector3d a, Vector3d b) {
double angle = 0.0;
angle = std::atan2(a.cross(b).norm(), a.dot(b));
return angle;
};
for (const auto& f : m_mesh.faces()) {
auto f_hh = m_mesh.halfedge(f);
//auto n = PMP::compute_face_normal(f, m_mesh);
vertex_descriptor vs[3];
Vector3d ps[3];
int i = 0;
for (const auto& v : m_mesh.vertices_around_face(f_hh)) {
auto p = m_mesh.point(v);
ps[i] = Vector3d(p.x(), p.y(), p.z());
vs[i++] = v;
}
auto n = (ps[1] - ps[0]).cross(ps[2] - ps[0]).normalized();
auto a1 = getAngle((ps[1] - ps[0]).normalized(), (ps[2] - ps[0]).normalized());
auto a2 = getAngle((ps[2] - ps[1]).normalized(), (ps[0] - ps[1]).normalized());
auto a3 = getAngle((ps[0] - ps[2]).normalized(), (ps[1] - ps[2]).normalized());
auto area = PMP::face_area(f, m_mesh);
map[vs[0]].push_back(n * a1);
map[vs[1]].push_back(n * a2);
map[vs[2]].push_back(n * a3);
auto p = m_mesh.point(vs[0]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
p = m_mesh.point(vs[1]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
p = m_mesh.point(vs[2]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
}
int j = 0;
int i = 0;
for (const auto& f : m_mesh.faces()) {
auto f_hh = m_mesh.halfedge(f);
for (const auto& v : m_mesh.vertices_around_face(f_hh)) {
const auto& p = m_mesh.point(v);
m_vertices_buf.row(i) = RowVector3d(p.x(), p.y(), p.z());
Vector3d n(0, 0, 0);
//auto n = PMP::compute_face_normal(f, m_mesh);
Vector3d norm = Vector3d(n.x(), n.y(), n.z());
for (auto val : map[v]) {
norm += val;
}
norm.normalize();
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(norm.x(), norm.y(), norm.z()) * 3,
Vector3d(1.0, 0, 0));
m_normals_buf.row(i++) = RowVector3d(norm.x(), norm.y(), norm.z());
}
m_faces_buf.row(j++) = RowVector3i(i - 3, i - 2, i - 1);
}
Follows the painter code:
m_vertexAttrLoc = program.attributeLocation("v_vertex");
m_colorAttrLoc = program.attributeLocation("v_color");
m_normalAttrLoc = program.attributeLocation("v_normal");
m_mvMatrixLoc = program.uniformLocation("v_modelViewMatrix");
m_projMatrixLoc = program.uniformLocation("v_projectionMatrix");
m_normalMatrixLoc = program.uniformLocation("v_normalMatrix");
//m_relativePosLoc = program.uniformLocation("v_relativePos");
m_opacityLoc = program.uniformLocation("v_opacity");
m_colorMaskLoc = program.uniformLocation("v_colorMask");
//bool for unmapping depth color
m_useDepthMap = program.uniformLocation("v_useDepthMap");
program.setUniformValue(m_mvMatrixLoc, modelView);
//uniform used for Color map to regular model switch
program.setUniformValue(m_useDepthMap, (m_showColorMap &&
(m_showProblemAreas || m_showPrepMap || m_showDepthMap || m_showMockupMap)));
QMatrix3x3 normalMatrix = modelView.normalMatrix();
program.setUniformValue(m_normalMatrixLoc, normalMatrix);
program.setUniformValue(m_projMatrixLoc, projection);
//program.setUniformValue(m_relativePosLoc, m_relativePos);
program.setUniformValue(m_opacityLoc, m_opacity);
program.setUniformValue(m_colorMaskLoc, m_colorMask);
glEnableVertexAttribArray(m_vertexAttrLoc);
m_vertices.bind();
glVertexAttribPointer(m_vertexAttrLoc, 3, GL_DOUBLE, false, 3 * sizeof(GLdouble), NULL);
m_vertices.release();
glEnableVertexAttribArray(m_normalAttrLoc);
m_normals.bind();
glVertexAttribPointer(m_normalAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_normals.release();
glEnableVertexAttribArray(m_colorAttrLoc);
if (m_showProblemAreas) {
m_problemColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_problemColorMap.release();
}
else if (m_showPrepMap) {
m_prepColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_prepColorMap.release();
}
else if (m_showMockupMap) {
m_mokupColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_mokupColorMap.release();
}
else {
//m_colors.bind();
//glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
//m_colors.release();
}
m_indices.bind();
glDrawElements(GL_TRIANGLES, m_indices.size() / sizeof(int), GL_UNSIGNED_INT, NULL);
m_indices.release();
glDisableVertexAttribArray(m_vertexAttrLoc);
glDisableVertexAttribArray(m_normalAttrLoc);
glDisableVertexAttribArray(m_colorAttrLoc);
EDIT: Sorry for not being clear enough. The cube is merely an example. My requirements are that the shading works for any kind of mesh. Those with very sharp edges, and those that are very organic (like humans or animals).
The issue is clearly explained by the image "Normals calculated in my program" from your question. The normal vectors at the corners and edges of the cube are not normal perpendicular to the faces:
For a proper specular reflection on plane faces, the normal vectors have to be perpendicular to the sides of the cube.
The vertex coordinate and its normal vector from a tuple with 6 components (x, y, z, nx, ny, nz).
A vertex coordinate on an edge of the cube is adjacent to 2 sides of the cube and 2 (face) normal vectors. The 8 vertex coordinates on the 8 corners of the cube are adjacent to 3 sides (3 normal vectors) each.
To define the vertex attributes with face normal vectors (perpendicular to a side) you have to define multiple tuples with the same vertex coordinate but different normal vectors. You have to use the different attribute tuples to form the triangle primitives on the different sides of the cube.
e.g. If you have defined a cube with the left, front, bottom coordinate of (-1, -1, -1) and the right, back, top coordinate of (1, 1, 1), then the vertex coordinate (-1, -1, -1) is adjacent to the left, front and bottom side of the cube:
x y z nx ny nz
left: -1 -1 -1 -1 0 0
front: -1 -1 -1 0 -1 0
bottom: -1 -1 -1 0 0 -1
Use the left attribute tuple to form the triangle primitives on the left side, the front to form the front and bottom for the triangles on the bottom.
In general you have to decide what you want. There is no general approach for all meshes.
Either you have a fine granulated mesh and you want a smooth appearance (e.g a sphere). In that case your approach is fine, it will generate a smooth light transition on the edges between the primitives.
Or you have a mesh with hard edges like a cube. In that case you have to "duplicate" vertices. If 2 (or even more) triangles share a vertex coordinate, but the face normal vectors are different, then you have to create a separate tuple, for all the combinations of the vertex coordinate and the face normal vector.
For a general "smooth" solution you would have to interpolate the normal vectors of the vertex coordinates which are in the middle of plane surfaces, according to the surrounding geometry. That means if a bunch of triangle primitives form a plane, then all the normal vectors of the vertices have to be computed dependent on there position on the plane. At the centroid the normal vector is equal to the face normal vector. For all other points the normal vector has to be interpolated with the normal vectors of the surrounding faces.
Anyway that seems to be an XY problem. Why is there a "vertex" somewhere in the middle of a plane? Probably the plane is tessellated. But if the plan is tessellated, why are the normal vectors not interpolated too, during the tessellation process?
As mentioned in the other answers the problem is your mesh normals.
Computing an average normal, like you are doing currently, is what you would want
to do for a smooth object like a sphere. cgal has a function for that CGAL::Polygon_mesh_processing::compute_vertex_normal For a cube what you want is normals perpendicular to the faces
cgal has a functoin for that too CGAL::Polygon_mesh_processing::compute_face_normal
To debug the normals you can just set fragColor = vec4(norm,1); in mainmesh.frag. Here the cubes on the left have averaged (smooth) normals and on the right have face (flat) normals:And shaded they look like this:
shading has to work for any kind of mesh (a cube or any organic mesh)
For that you can use something like per_corner_normals whitch:
Implements a simple scheme which computes corner normals as averages
of normals of faces incident on the corresponding vertex which do not
deviate by more than a specified dihedral angle (e.g. 20°)
And this is what it looks like with a angle of 1°, 20°, 100°:
In your image, we can see that the inner triangle (the one that doesn't have point on cube edges, in top left quarter) has an homogeneous color.
My interpretation is that triangles that have points on the edge/corner of the cube share the same vertex and then share the same normal and some how the normal are averaged. So it's not perpendicular to the faces.
To debug this, you should create a simple geometry of a cube with 6 faces and 2 triangles per face. Hence it's make 12 triangles.
Two options:
If you have 8 vertex in the geometry, the corner are shared between triangles of different face and the issue came from the geometry generator.
If you have 6×4=24 vertex in the geometry the truth lies elsewhere.

X-Y-Distance from camera to object in vertex shader

I want to show some fog / aerial view in my application. But I only want to use the x,y world distance from camera to the model to determine the appearance.
I already managed to get the signed z-distance from camera to the models with this calculation.
The red objects have positive z distance to camera, the blue ones are negative in contrast to this implementation, where all values seem positive.
Vertex shader:
uniform mat4 u_mvp; // Model-View-Projection-Matrix
uniform mat4 u_mv; // Model-View-Matrix
uniform vec4 u_color; // Object color
attribute vec4 a_pos; // Vertex position
varying vec4 color; // Out color
// Fog
const float density = 0.007;
const float gradient = 1.5;
void main() {
gl_Position = u_mvp * a_pos;
// Fog
float distance = -(u_mv * a_pos).z; // Direct distance from camera
// 4000 is some invented constant to bring distance to ~[-1,1].
float visibility = clamp((distance / 4000.0), 0.0, 1.0);
color = mix(vec4(1.0, 0.0, 0.0, 1.0), u_color, visibility);
if(distance < 0){
color = vec4(0.0, 0.0, 1.0, 1.0);
}
}
Fragment shader:
varying vec4 color;
void main() {
gl_FragColor = color;
}
Why there can be a negative z-value? Or is it common?
How can I calculate the x,y world distance to camera?
If you want to get the distance to the camera, in the range [-1, 1], then you can use the clips pace coordinated. The clipspace coordinate can be transformed to a normalized device coordinate by Perspective divide. The normalized device coordinates (x, y and z) are in range [-1, 1] and can be transformed to the range [0, 1] with ease:
gl_Position = u_mvp * a_pos; // clip space
vec3 ndc = gl_Position.xyz / gl_Position.w; // NDC in [-1, 1] (by perspective divide)
float depth = ndc.z * 0.5 + 0.5; // depth in [0, 1]

How to normalize the vertices array in OpenGL?

I am trying to draw a triangle on my OpenGL window.
What I want is that I specify three points and OpenGL draws the triangle based on that, the problem is, for that I have to convert the OpenGL coordinates to normalized screen coordinates.
It would look something like this, after normalized:--
Here the three ( X, Y )s (points of triangle) will be specified by the user using three vectors.
But the problem is that the vertices(red points shown in the example figure) are in perspective or default OpenGL coordinates, but I want to make a 2D triangle, so I would not require 3D coordinates for drawing it.
Can you show me a way to convert the vertices into normalized coordinates (from 0.0 to 1.0 or -1.0, in both axis to 0.0 to "width or height of the screen respective to the axis", in both axis)
The code of the shaders(for the triangle):-
::Vertex Shader::
#version 330 core
layout (location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position.x, position.y, position.z, 1.0);
}
::Fragment Shader::
#version 330 core
out vec4 color;
void main()
{
color = vec4(1.0f, 0.5f, 0.2f, 1.0f);
}
What the vertices array looks like(Sample Code):-
GLfloat x = 0; // First X Point
GLfloat y = 0; // First Y Point
GLfloat x1 = 10; // Second X Point
GLfloat y1 = 10; // Second Y Point
GLfloat x2 = 0; // Third X Point
GLfloat y2 = 10; // Third Y Point
GLfloat vertices[] =
{
2 * x / WIDTH - 1, 2 * y / HEIGHT - 1, 0.0f, // Left
2 * x1 / WIDTH - 1, 2 * y1 / HEIGHT - 1, 0.0f, // Right
2 * x2 / WIDTH - 1, 2 * y2 / HEIGHT - 1, 0.0f // Top
};

Random coloured blocks

I have recently used a freetype library to read text files and followed some guide on how to display text in 2D.
I tried to extend the code to support 3D text rendering but i started having opengl related problems with it.
At certain angles the text picture starts to fade, and the whole axis on which the text is located starts to inherit its colour.
Fading;
Black Slice
All the related code is:
Drawing function (inherited from learnopengl.com)
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Activate corresponding render state
glActiveTexture(GL_TEXTURE0);
glBindVertexArray(VAO);
glEnableVertexAttribArray(0);
scale /= RESOLUTION;
vec2 start(x, y);
// Iterate through all characters
std::string::const_iterator c;
for (c = text.begin(); c != text.end(); c++)
{
Character ch = Characters[*c];
if (*c == '\r' || (x-start.x > xMax && xMax != 0.0f))
{
y += ((ch.Advance >> 6) + 16) * scale ;
x = start.x;
continue;
}
GLfloat xpos = x + ch.Bearing.x * scale;
GLfloat ypos = y + (ch.Size.y - ch.Bearing.y) * scale;
GLfloat w = ch.Size.x * scale;
GLfloat h = ch.Size.y * scale;
// Update VBO for each character
GLfloat vertices[6][4] = {
{ xpos, ypos - h, 0.0, 0.0 },
{ xpos, ypos, 0.0, 1.0 },
{ xpos + w, ypos, 1.0, 1.0 },
{ xpos, ypos - h, 0.0, 0.0 },
{ xpos + w, ypos, 1.0, 1.0 },
{ xpos + w, ypos - h, 1.0, 0.0 }
};
// Render glyph texture over quad
glBindTexture(GL_TEXTURE_2D, ch.TextureID);
// Update content of VBO memory
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(vertices), vertices);
// Render quad
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Now advance cursors for next glyph (note that advance is number of 1/64 pixels)
x += (ch.Advance >> 6) * scale; // Bitshift by 6 to get value in pixels (2^6 = 64)
}
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_BLEND);
Shader uniform initialization
ShaderBuilder::LoadShader(shader)->Add_mat4("projection", projection).Add_mat4("view", view).
Add_mat4("model", model).Add_vec3("textColor", color).Add_texture("text", 0);
Vertex Shader
#version 400 core
layout (location = 0) in vec4 vertex; //
out vec2 TexCoords;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
void main()
{
vec2 vertexGL = (vertex.xy - vec2(400,300)) / vec2(400,300);
vertexGL = vertex.xy;
vec4 position = projection * view * model * vec4(vertexGL.xy, 0.0, 1.0);
gl_Position = position / position.w;
TexCoords = vertex.zw;
}
Fragment Shader
#version 400 core
in vec2 TexCoords;
out vec4 color;
uniform sampler2D text;
uniform vec3 textColor;
void main()
{
vec4 sampled = vec4(1.0, 1.0, 1.0, texture(text, TexCoords).r);
color = vec4(textColor, 1.0) * sampled;
//color = vec4(1);
}
I finally found the mistake, for some unknown reason i thought normalizing my vertex coords after applying the matrix multiplication would be a good practice.
Apparently it isn't.
vec4 position = projection * view * model * vec4(vertexGL.xy, 0.0, 1.0);
gl_Position = position;// / position.w;
so as the commenting declares, this removed the mistake.

2D lighting with shaders - light radius affected by window size

I have a shader which adds lighting to an otherwise 2D scene (lights are slightly above the 2D plane). In my fragment shader, I loop through each light to calculate the direction and distance by applying my ortho matrix to the light's pos.
The problem is, the light's "radius" is affected by the size and aspect ratio of my window. I thought that translating the coordinates using the ortho matrix would ensure that the screen size wouldn't matter, but a wide window produces an oval light, and smaller windows produce smaller ovals than larger windows. Should I be using another matrix of some sort?
Full shader here (change window size to see the unwanted effect): http://glsl.heroku.com/e#14464.0
uniform vec2 resolution;
void main(void)
{
//orthographic matrix
mat4 ortho_matrix = mat4(
2.0/resolution.x, 0, 0, 0,
0, 2.0/-resolution.y, 0, 0,
0, 0, -1, 0,
-1, 1, 0, 1
);
//surface normal of the 2D plane (looking straight down)
vec3 surface_normal = vec3(0.0, 0.0, 1.0);
//screen position of the light
vec2 light_screen_pos = vec2(650, 150);
//translate light's position to normalized coordinates
//the z value makes sure it is slightly above the 2D plane
vec4 light_ortho_pos = ortho_matrix * vec4(light_screen_pos, -0.03, 1.0);
//calculate the light for this fragment
vec3 light_direction = light_ortho_pos.xyz - vec3(gl_FragCoord.x / resolution.x, gl_FragCoord.y / resolution.y, 0);
float dist = length(light_direction);
light_direction = normalize(light_direction);
vec3 light = clamp(dot(surface_normal, light_direction), 0.0, 1.0) * vec3(0.5, 0.5, 0.5);
vec3 cel_light = step(0.15, (light.r + light.g + light.b) / 3.0) * light;
gl_FragColor = vec4(pow(light + cel_light, vec3(0.4545)), 1.0);
}
Note: I know it's not optimal to make this calculation for each light, each pixel - I should be passing the light's position in another uniform probably.
The light direction needs to be scaled according to screen resolution. I ended up adding the following code to make it work, with an arbitrary brightness of 500 or so:
light_direction *= vec3(resolution.x / brightness, resolution.y / brightness, 1.0);