Using rust with glfw and gl bindings.
I have created an array of vertices for usage in vbo:
let x = 2.0 / ((HORIZONTAL_BOOK_AMOUNT * 2) + HORIZONTAL_BOOK_AMOUNT + 1) as f32;
// x is the desired distance based on width of NDC divided by amount of horizontal objects to draw
// in the case of attached picture the HORIZONTAL_BOOK_AMOUNT would be 4
let mut vbo = 0;
let side = x * IMAGE_HEIGHT_MULTIPLIER;
// here x is multiplied by constant, equaling to desired height of object
let vertices: Vec<f32> = vec![
-1.0 + x, 1.0 - x, 0.0, 1.0, //top left
-1.0 + 3.0 * x, 1.0 - x, 1.0, 1.0, // top right
-1.0 + 3.0 * x, 1.0 - x - (2.0 * side), 1.0, 0.0, //bottom right
-1.0 + 3.0 * x, 1.0 - x - (2.0 * side), 1.0, 0.0, //bottom right
-1.0 + x, 1.0 - x - (2.0 * side), 0.0, 0.0, //bottom left
-1.0 + x, 1.0 - x, 0.0, 1.0]; //top left
gl::GenBuffers(1, &mut vbo);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(gl::ARRAY_BUFFER, (vertices.len() * std::mem::size_of::<GLfloat>()) as GLsizeiptr, &vertices[0] as *const f32 as *const std::ffi::c_void, gl::STATIC_DRAW);
The problem is that after drawing the distance between top left vertex and top border should be equal to distance between top left vertex and left border.
The achieved effect, distances are not equal and are forming a rectangle:
I tried recreating the issue with shorter example but it yielded similar results.
Possibly it could be window size issue, but it is just my guess
The desired effect, which forms a square:
VAO code:
let mut VAO = 0;
gl::GenVertexArrays(1, &mut VAO);
gl::BindVertexArray(VAO);
let stride = 4 * std::mem::size_of::<GLfloat>() as GLsizei;
gl::VertexAttribPointer(0, 2, gl::FLOAT, gl::FALSE, stride, std::ptr::null());
gl::EnableVertexAttribArray(0);
gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (2 * std::mem::size_of::<GLfloat>()) as *const std::ffi::c_void);
gl::EnableVertexAttribArray(1);
Vertex shader:
#version 430 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec2 aTexCoord;
out vec2 TexCoord;
uniform mat4 translatematrix;
// this matrix is used for drawing objects by iterating through array // of (in this case:) books based on amount of them and horizontal // // amount
void main()
{
gl_Position = translatematrix * vec4(aPos, 0.0, 1.0);
TexCoord = aTexCoord;
}
Frag shader:
#version 430 core
out vec4 FragColor;
in vec2 TexCoord;
uniform sampler2D tex1;
void main()
{
FragColor = texture(tex1, TexCoord);
}
glViewport is set using glfw window event:
for (_, event) in glfw::flush_messages(&events) {
...
match event {
glfw::WindowEvent::FramebufferSize(width, height) => {
gl::Viewport(0, 0, width, height); },
You calculate the vertices in NDC (normalized device coordinates), which range through [-1, 1] within the viewport. The top-left vertex is x NDC units away from the corner both horizontally and vertically. However, for a non-square viewport, that would be a different amount of pixels.
Instead you should calculate all vertex coordinates in pixels, and incorporate a projection matrix into the translatematrix to convert pixels to NDC in the vertex shader.
You need to scale the position along the Y axis with the inverse aspect ratio:
y = x * width / height
I'm drawing a 2D tilemap using OpenGL and I will like to be able to know where the position of the mouse corresponds into my scene. This is what I currently have:
To draw this screen this projection is used
glm::mat4 projection = glm::perspective(
glm::radians(45.0f),
(float)screenWidth / (float)screenHeight,
1.0f,
100.0f
);
Then this camera is used to move and zoom the tilemap
glm::vec3 camera(0.0f, 0.0f, -1.00f);
Which then translates into a camera view
glm::mat4 cameraView = glm::translate(state.projection, camera);
That finally gets passed through a uniform to the vertex shader
#version 330 core
layout(location = 0) in vec2 aPosition;
uniform mat4 uCameraView;
void main() {
gl_Position = uCameraView * vec4(aPosition.x, aPosition.y, 0.0f, 1.0f);
}
This shader receives a normalized vertex, which it means that I never know how much in pixels a tile is in my screen.
Now I'm trying to somehow calculate where the mouse will be inside of my scene if it was projected like a ray into the tilemap and then hit it. If I managed to get the position of that collision I will be able to know which tile the mouse is hovering.
What will be the best approach to find this coordinate?
In the end I found this solution to map the mouse pixel coordinates to the perspective:
glm::vec4 tile = glm::translate(projection, glm::vec3(0.0f, 0.0f, camera.z)) *
glm::vec4(size.tile.regular, size.tile.regular, camera.z, 1.0f);
glm::vec3 ndcTile =
glm::vec3(tile.x / tile.w, tile.y / tile.w, tile.z / tile.w);
float pixelUnit = windowWidth * ndcTile.x;
float pixelCameraX = (camera.x / size.tile.regular) * pixelUnit;
float pixelCameraY = (camera.y / size.tile.regular) * pixelUnit;
float originX = (windowWidth / 2.0f) + pixelCameraX;
float originY = (windowHeight / 2.0f) - pixelCameraY;
float tileX = (state.input.pixelCursorX - originX) / pixelUnit;
float tileY = (state.input.pixelCursorY - originY) / pixelUnit;
selectedTileX = tileX > 0 ? tileX : tileX - 1;
selectedTileY = tileY > 0 ? tileY : tileY - 1;
I'm trying to make a cube, which is irregularly triangulated, but virtually coplanar, shade correctly.
Here is the current result I have:
With wireframe:
Normals calculated in my program:
Normals calculated by meshlabjs.net:
The lighting works properly when using regular size triangles for the cube. As you can see, I'm duplicating vertices and using angle weighting.
lighting.frag
vec4 scene_ambient = vec4(1, 1, 1, 1.0);
struct material
{
vec4 ambient;
vec4 diffuse;
vec4 specular;
float shininess;
};
material frontMaterial = material(
vec4(0.25, 0.25, 0.25, 1.0),
vec4(0.4, 0.4, 0.4, 1.0),
vec4(0.774597, 0.774597, 0.774597, 1.0),
76
);
struct lightSource
{
vec4 position;
vec4 diffuse;
vec4 specular;
float constantAttenuation, linearAttenuation, quadraticAttenuation;
float spotCutoff, spotExponent;
vec3 spotDirection;
};
lightSource light0 = lightSource(
vec4(0.0, 0.0, 0.0, 1.0),
vec4(100.0, 100.0, 100.0, 100.0),
vec4(100.0, 100.0, 100.0, 100.0),
0.1, 1, 0.01,
180.0, 0.0,
vec3(0.0, 0.0, 0.0)
);
vec4 light(lightSource ls, vec3 norm, vec3 deviation, vec3 position)
{
vec3 viewDirection = normalize(vec3(1.0 * vec4(0, 0, 0, 1.0) - vec4(position, 1)));
vec3 lightDirection;
float attenuation;
//ls.position.xyz = cameraPos;
ls.position.z += 50;
if (0.0 == ls.position.w) // directional light?
{
attenuation = 1.0; // no attenuation
lightDirection = normalize(vec3(ls.position));
}
else // point light or spotlight (or other kind of light)
{
vec3 positionToLightSource = vec3(ls.position - vec4(position, 1.0));
float distance = length(positionToLightSource);
lightDirection = normalize(positionToLightSource);
attenuation = 1.0 / (ls.constantAttenuation
+ ls.linearAttenuation * distance
+ ls.quadraticAttenuation * distance * distance);
if (ls.spotCutoff <= 90.0) // spotlight?
{
float clampedCosine = max(0.0, dot(-lightDirection, ls.spotDirection));
if (clampedCosine < cos(radians(ls.spotCutoff))) // outside of spotlight cone?
{
attenuation = 0.0;
}
else
{
attenuation = attenuation * pow(clampedCosine, ls.spotExponent);
}
}
}
vec3 ambientLighting = vec3(scene_ambient) * vec3(frontMaterial.ambient);
vec3 diffuseReflection = attenuation
* vec3(ls.diffuse) * vec3(frontMaterial.diffuse)
* max(0.0, dot(norm, lightDirection));
vec3 specularReflection;
if (dot(norm, lightDirection) < 0.0) // light source on the wrong side?
{
specularReflection = vec3(0.0, 0.0, 0.0); // no specular reflection
}
else // light source on the right side
{
specularReflection = attenuation * vec3(ls.specular) * vec3(frontMaterial.specular)
* pow(max(0.0, dot(reflect(lightDirection, norm), viewDirection)), frontMaterial.shininess);
}
return vec4(ambientLighting + diffuseReflection + specularReflection, 1.0);
}
vec4 generateGlobalLighting(vec3 norm, vec3 position)
{
return light(light0, norm, vec3(2,0,0), position);
}
mainmesh.frag
#version 430
in vec3 f_color;
in vec3 f_normal;
in vec3 f_position;
in float f_opacity;
out vec4 fragColor;
vec4 generateGlobalLighting(vec3 norm, vec3 position);
void main()
{
vec3 norm = normalize(f_normal);
vec4 l0 = generateGlobalLighting(norm, f_position);
fragColor = vec4(f_color, f_opacity) * l0;
}
Follows the code to generate the verts, normals and faces for the painter.
m_vertices_buf.resize(m_mesh.num_faces() * 3, 3);
m_normals_buf.resize(m_mesh.num_faces() * 3, 3);
m_faces_buf.resize(m_mesh.num_faces(), 3);
std::map<vertex_descriptor, std::list<Vector3d>> map;
GLDebugging* deb = GLDebugging::getInstance();
auto getAngle = [](Vector3d a, Vector3d b) {
double angle = 0.0;
angle = std::atan2(a.cross(b).norm(), a.dot(b));
return angle;
};
for (const auto& f : m_mesh.faces()) {
auto f_hh = m_mesh.halfedge(f);
//auto n = PMP::compute_face_normal(f, m_mesh);
vertex_descriptor vs[3];
Vector3d ps[3];
int i = 0;
for (const auto& v : m_mesh.vertices_around_face(f_hh)) {
auto p = m_mesh.point(v);
ps[i] = Vector3d(p.x(), p.y(), p.z());
vs[i++] = v;
}
auto n = (ps[1] - ps[0]).cross(ps[2] - ps[0]).normalized();
auto a1 = getAngle((ps[1] - ps[0]).normalized(), (ps[2] - ps[0]).normalized());
auto a2 = getAngle((ps[2] - ps[1]).normalized(), (ps[0] - ps[1]).normalized());
auto a3 = getAngle((ps[0] - ps[2]).normalized(), (ps[1] - ps[2]).normalized());
auto area = PMP::face_area(f, m_mesh);
map[vs[0]].push_back(n * a1);
map[vs[1]].push_back(n * a2);
map[vs[2]].push_back(n * a3);
auto p = m_mesh.point(vs[0]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
p = m_mesh.point(vs[1]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
p = m_mesh.point(vs[2]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
}
int j = 0;
int i = 0;
for (const auto& f : m_mesh.faces()) {
auto f_hh = m_mesh.halfedge(f);
for (const auto& v : m_mesh.vertices_around_face(f_hh)) {
const auto& p = m_mesh.point(v);
m_vertices_buf.row(i) = RowVector3d(p.x(), p.y(), p.z());
Vector3d n(0, 0, 0);
//auto n = PMP::compute_face_normal(f, m_mesh);
Vector3d norm = Vector3d(n.x(), n.y(), n.z());
for (auto val : map[v]) {
norm += val;
}
norm.normalize();
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(norm.x(), norm.y(), norm.z()) * 3,
Vector3d(1.0, 0, 0));
m_normals_buf.row(i++) = RowVector3d(norm.x(), norm.y(), norm.z());
}
m_faces_buf.row(j++) = RowVector3i(i - 3, i - 2, i - 1);
}
Follows the painter code:
m_vertexAttrLoc = program.attributeLocation("v_vertex");
m_colorAttrLoc = program.attributeLocation("v_color");
m_normalAttrLoc = program.attributeLocation("v_normal");
m_mvMatrixLoc = program.uniformLocation("v_modelViewMatrix");
m_projMatrixLoc = program.uniformLocation("v_projectionMatrix");
m_normalMatrixLoc = program.uniformLocation("v_normalMatrix");
//m_relativePosLoc = program.uniformLocation("v_relativePos");
m_opacityLoc = program.uniformLocation("v_opacity");
m_colorMaskLoc = program.uniformLocation("v_colorMask");
//bool for unmapping depth color
m_useDepthMap = program.uniformLocation("v_useDepthMap");
program.setUniformValue(m_mvMatrixLoc, modelView);
//uniform used for Color map to regular model switch
program.setUniformValue(m_useDepthMap, (m_showColorMap &&
(m_showProblemAreas || m_showPrepMap || m_showDepthMap || m_showMockupMap)));
QMatrix3x3 normalMatrix = modelView.normalMatrix();
program.setUniformValue(m_normalMatrixLoc, normalMatrix);
program.setUniformValue(m_projMatrixLoc, projection);
//program.setUniformValue(m_relativePosLoc, m_relativePos);
program.setUniformValue(m_opacityLoc, m_opacity);
program.setUniformValue(m_colorMaskLoc, m_colorMask);
glEnableVertexAttribArray(m_vertexAttrLoc);
m_vertices.bind();
glVertexAttribPointer(m_vertexAttrLoc, 3, GL_DOUBLE, false, 3 * sizeof(GLdouble), NULL);
m_vertices.release();
glEnableVertexAttribArray(m_normalAttrLoc);
m_normals.bind();
glVertexAttribPointer(m_normalAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_normals.release();
glEnableVertexAttribArray(m_colorAttrLoc);
if (m_showProblemAreas) {
m_problemColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_problemColorMap.release();
}
else if (m_showPrepMap) {
m_prepColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_prepColorMap.release();
}
else if (m_showMockupMap) {
m_mokupColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_mokupColorMap.release();
}
else {
//m_colors.bind();
//glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
//m_colors.release();
}
m_indices.bind();
glDrawElements(GL_TRIANGLES, m_indices.size() / sizeof(int), GL_UNSIGNED_INT, NULL);
m_indices.release();
glDisableVertexAttribArray(m_vertexAttrLoc);
glDisableVertexAttribArray(m_normalAttrLoc);
glDisableVertexAttribArray(m_colorAttrLoc);
EDIT: Sorry for not being clear enough. The cube is merely an example. My requirements are that the shading works for any kind of mesh. Those with very sharp edges, and those that are very organic (like humans or animals).
The issue is clearly explained by the image "Normals calculated in my program" from your question. The normal vectors at the corners and edges of the cube are not normal perpendicular to the faces:
For a proper specular reflection on plane faces, the normal vectors have to be perpendicular to the sides of the cube.
The vertex coordinate and its normal vector from a tuple with 6 components (x, y, z, nx, ny, nz).
A vertex coordinate on an edge of the cube is adjacent to 2 sides of the cube and 2 (face) normal vectors. The 8 vertex coordinates on the 8 corners of the cube are adjacent to 3 sides (3 normal vectors) each.
To define the vertex attributes with face normal vectors (perpendicular to a side) you have to define multiple tuples with the same vertex coordinate but different normal vectors. You have to use the different attribute tuples to form the triangle primitives on the different sides of the cube.
e.g. If you have defined a cube with the left, front, bottom coordinate of (-1, -1, -1) and the right, back, top coordinate of (1, 1, 1), then the vertex coordinate (-1, -1, -1) is adjacent to the left, front and bottom side of the cube:
x y z nx ny nz
left: -1 -1 -1 -1 0 0
front: -1 -1 -1 0 -1 0
bottom: -1 -1 -1 0 0 -1
Use the left attribute tuple to form the triangle primitives on the left side, the front to form the front and bottom for the triangles on the bottom.
In general you have to decide what you want. There is no general approach for all meshes.
Either you have a fine granulated mesh and you want a smooth appearance (e.g a sphere). In that case your approach is fine, it will generate a smooth light transition on the edges between the primitives.
Or you have a mesh with hard edges like a cube. In that case you have to "duplicate" vertices. If 2 (or even more) triangles share a vertex coordinate, but the face normal vectors are different, then you have to create a separate tuple, for all the combinations of the vertex coordinate and the face normal vector.
For a general "smooth" solution you would have to interpolate the normal vectors of the vertex coordinates which are in the middle of plane surfaces, according to the surrounding geometry. That means if a bunch of triangle primitives form a plane, then all the normal vectors of the vertices have to be computed dependent on there position on the plane. At the centroid the normal vector is equal to the face normal vector. For all other points the normal vector has to be interpolated with the normal vectors of the surrounding faces.
Anyway that seems to be an XY problem. Why is there a "vertex" somewhere in the middle of a plane? Probably the plane is tessellated. But if the plan is tessellated, why are the normal vectors not interpolated too, during the tessellation process?
As mentioned in the other answers the problem is your mesh normals.
Computing an average normal, like you are doing currently, is what you would want
to do for a smooth object like a sphere. cgal has a function for that CGAL::Polygon_mesh_processing::compute_vertex_normal For a cube what you want is normals perpendicular to the faces
cgal has a functoin for that too CGAL::Polygon_mesh_processing::compute_face_normal
To debug the normals you can just set fragColor = vec4(norm,1); in mainmesh.frag. Here the cubes on the left have averaged (smooth) normals and on the right have face (flat) normals:And shaded they look like this:
shading has to work for any kind of mesh (a cube or any organic mesh)
For that you can use something like per_corner_normals whitch:
Implements a simple scheme which computes corner normals as averages
of normals of faces incident on the corresponding vertex which do not
deviate by more than a specified dihedral angle (e.g. 20°)
And this is what it looks like with a angle of 1°, 20°, 100°:
In your image, we can see that the inner triangle (the one that doesn't have point on cube edges, in top left quarter) has an homogeneous color.
My interpretation is that triangles that have points on the edge/corner of the cube share the same vertex and then share the same normal and some how the normal are averaged. So it's not perpendicular to the faces.
To debug this, you should create a simple geometry of a cube with 6 faces and 2 triangles per face. Hence it's make 12 triangles.
Two options:
If you have 8 vertex in the geometry, the corner are shared between triangles of different face and the issue came from the geometry generator.
If you have 6×4=24 vertex in the geometry the truth lies elsewhere.
I'm programing a GUI library in openGL and decided to add rounded corners because I feel like it gives a much more professional look to the units.
I've implemented the common
length(max(abs(p) - b, 0.0)) - radius
method and it almost works perfectly except for the fact tat the corners seems as though they are stretched:
My fragment shader:
in vec2 passTexCoords;
uniform vec4 color;
uniform int width;
uniform int height;
uniform int radius;
void main() {
fragment = color;
vec2 pos = (abs(passTexCoords - 0.5) + 0.5) * vec2(width, height);
float alpha = 1.0 - clamp(length(max(pos - (vec2(width, height) - radius), 0.0)) - radius, 0.0, 1.0);
fragment.a = alpha;
}
The stretching does make sense to me but when I replace with
vec2 pos = (abs(passTexCoords - 0.5) + 0.5) * vec2(width, height) * vec2(scaleX, scaleY);
and
float alpha = 1.0 - clamp(length(max(pos - (vec2(width, height) * vec2(scaleX, scaleY) - radius), 0.0)) - radius, 0.0, 1.0);
(where scaleX and scaleY are scalars between 0.0 and 1.0 that represent the width and height of the rectangle relative to the screen) the rectangle almost completely disappears:
The problem is that the distances are not scaled into screen space, and are therefore stretched across the greatest window axis as a result. You can fix this if you multiply the normalized position by the aspect ratio of the screen, along with the other parameters for the box. I wrote an example on Shadertoy that does this:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Input info
vec2 boxPos; // The position of the center of the box (in normalized coordinates)
vec2 boxBnd; // The half-bounds (radii) of the box (in normalzied coordinates)
float radius;// Radius
boxPos = vec2(0.5, 0.5); // center of the screen
boxBnd = vec2(0.25, 0.25); // half of the area
radius = 0.1;
// Normalize the pixel coordinates (this is "passTexCoords" in your case)
vec2 uv = fragCoord/iResolution.xy;
// (Note: iResolution.xy holds the x and y dimensions of the window in pixels)
vec2 aspectRatio = vec2(iResolution.x/iResolution.y, 1.0);
// In order to make sure visual distances are preserved, we multiply everything by aspectRatio
uv *= aspectRatio;
boxPos *= aspectRatio;
boxBnd *= aspectRatio;
// Time varying pixel color
vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4));
// Output to screen
float alpha = length(max(abs(uv - boxPos) - boxBnd, 0.0)) - radius;
// Shadertoy doesn't have an alpha in this case
if(alpha <= 0.0){
fragColor = vec4(col,1.0);
}else{
fragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
}
There may be a less computationally expensive way to do this, but this was a simple solution I cooked up.
I assume the passTexCoords is a a texture coordinate in range [0, 1]. And that width and height is the size of the screen. And scaleX and scaleY is the ration of the green area to the size of the screen.
Calculate the absolute position (pos) of the current fragment in relation to the center of the green area in pixel units:
vec2 pos = (abs(passTexCoords - 0.5) + 0.5) * vec2(width*scaleX, height*scaleY);
Calculate the distance from the center point of the arc, to the current fragment:
vec2 arc_cpt_vec = max(pos - vec2(width*scaleX, height*scaleY) + radius, 0.0);
If the length of the vector is greater than the radius, then the fragment has to be skipped:
float alpha = length(arc_cpt_vec) > radius ? 0.0 : 1.0;