Why drawing a texture using Opengl ( using rust ) is showing border colour - opengl

I am drawing this texture using opengl and rust. But for some reason the image has border around it when I draw away from the top left. I think there is something wrong with matrix / viewport. Not entirely sure.
let width = 1280; // Window is initialised to these values. Hard coded for context.
let height = 720;
unsafe {
gl::Viewport(0, 0, width, height);
}
let orth = cgmath::ortho::<f32>(0.0, width as f32, height as f32, 0.0, -1.00, 100.0)
unsafe {
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_BORDER as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_BORDER as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
let border_colour = [0.9, 0.2, 0.5, 1.0];
gl::TexParameterfv(gl::TEXTURE_2D, gl::TEXTURE_BORDER_COLOR, border_colour.as_ptr());
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA as i32,
img.width as i32,
img.height as i32,
0,
gl::RGBA,
gl::UNSIGNED_BYTE,
data.as_ptr() as *const c_void,
);
gl::GenerateMipmap(gl::TEXTURE_2D);
}
Before Drawing the texture, I clear the display
pub fn clear_frame_buffer() {
unsafe {
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
}
}
Drawing the texture:
pub fn draw_quad(tex: &Texture2D, program : &Program, mesh : &Mesh, projection : &Matrix4<f32>, position : Vector3<f32>) {
program.bind();
mesh.vao.bind();
tex.bind();
if let Some(location) = program.get_uniform_location("view") {
let camera_transform = Matrix4::<f32>::identity() * Matrix4::from_scale(1.0);
program.set_matrix(location, &[camera_transform]);
}
if let Some(location) = program.get_uniform_location("projection") {
program.set_matrix(location, &[*projection]);
}
if let Some(location) = program.get_uniform_location("model") {
let model = {
let translate = Matrix4::<f32>::from_translation(position);
let (w, h) = tex.get_dimensions();
let scale = Matrix4::<f32>::from_nonuniform_scale(w as f32, h as f32, 1.0);
translate * scale
};
program.set_matrix(location, &[model]);
}
let count = mesh.count as i32;
unsafe {
gl::DrawArrays(gl::TRIANGLES, 0, count);
}
}
Then I move the texture
I know the pink is coming from my color text param. But I shouldn't be able to see it? I should be only drawing the image and not the border.
let quad_vertices: [f32; 30] = [
// Positions // Texcoords
1.0, -1.0, 0.0, 1.0, 0.0,
-1.0, -1.0, 0.0, 0.0, 0.0,
-1.0, 1.0, 0.0, 0.0, 1.0,
1.0, 1.0, 0.0, 1.0, 1.0,
1.0, -1.0, 0.0, 1.0, 0.0,
-1.0, 1.0, 0.0, 0.0, 1.0,
];
Vertex Shader
#version 330 core
layout (location = 0) in vec3 vertex;
layout (location = 2) in vec2 texcoords;
out vec2 TexCoords;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
TexCoords = texcoords;
gl_Position = projection * view * model * vec4(vertex.xy, 0.0, 1.0);
}
Fragment Shader
#version 330 core
in vec2 TexCoords;
out vec4 color;
uniform sampler2D image;
void main()
{
color = texture(image, TexCoords);
}

Related

glow depth buffer does not clear

renderer.draw
pub fn draw(&self, rotation: f32) {
unsafe {
// Shader sources
const TEXTURE_VS_SRC: &str = "
#version 330 core
layout (location = 0) in vec3 aposition;
layout (location = 1) in vec3 acolor;
layout (location = 2) in vec2 atexture_coordinate;
out vec3 color;
out vec2 texture_coordinate;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aposition, 1.0);
color = acolor;
texture_coordinate = atexture_coordinate;
}
";
const TEXTURE_FS_SRC: &str = "
#version 330 core
// Outputs colors in RGBA
out vec4 FragColor;
// Inputs the color from the Vertex Shader
in vec3 color;
// Inputs the texture coordinates from the Vertex Shader
in vec2 texture_coordinate;
// Gets the Texture Unit from the main function
uniform sampler2D tex0;
void main()
{
FragColor = texture(tex0, texture_coordinate);
}
";
// Vertices coordinates
let VERTICES: Vec<f32> = vec![
// COORDINATES / COLORS / TexCoord //
-0.5, 0.0, 0.5, 0.83, 0.70, 0.44, 0.0, 0.0,
-0.5, 0.0, -0.5, 0.83, 0.70, 0.44, 5.0, 0.0,
0.5, 0.0, -0.5, 0.83, 0.70, 0.44, 0.0, 0.0,
0.5, 0.0, 0.5, 0.83, 0.70, 0.44, 5.0, 0.0,
0.0, 0.8, 0.0, 0.92, 0.86, 0.76, 2.5, 5.0
];
// Indices for vertices order
let INDICES: Vec<u32> = vec![
0, 1, 2,
0, 2, 3,
0, 1, 4,
1, 2, 4,
2, 3, 4,
3, 0, 4
];
self.gl.clear_color(0.3, 0.3, 0.3, 1.0);
self.gl.clear(glow::COLOR_BUFFER_BIT | glow::DEPTH_BUFFER_BIT);
self.gl.clear_depth_f32(1.0);
self.gl.depth_func(glow::LESS);
self.gl.depth_mask(true);
self.gl.enable(glow::DEPTH_TEST);
let shader = Shader::new(&self.gl, TEXTURE_VS_SRC, TEXTURE_FS_SRC);
shader.bind(&self.gl);
shader.upload_uniform_mat4(&self.gl, "model", &glm::rotate(&glm::identity(), rotation, &glm::vec3(0.0, 1.0, 0.0)));
shader.upload_uniform_mat4(&self.gl, "view", &glm::translate(&glm::identity(), &glm::vec3(0.0, -0.5, -2.0)));
shader.upload_uniform_mat4(&self.gl, "projection", &Mat4::new_perspective((800.0 / 600.0), 45.0, 0.01, 100.0));
let mut texture = Texture::new(String::from("sandbox/assets/textures/checkerboard.png"), 1.0);
texture.init(&self.gl);
Texture::bind(&self.gl, texture.get_renderer_id().unwrap(), 0);
shader.upload_uniform_integer1(&self.gl, "tex0", 0);
let layout = BufferLayout::new(
vec![
BufferElement::new("aposition".parse().unwrap(), ShaderDataType::Float3, false),
BufferElement::new("acolor".parse().unwrap(), ShaderDataType::Float3, false),
BufferElement::new("atexture_coordinate".parse().unwrap(), ShaderDataType::Float2, false),
]
);
let index_buffer = IndexBuffer::new(&self.gl, INDICES);
let vertex_buffer = VertexBuffer::new(&self.gl, VERTICES, layout);
let vertex_array = VertexArray::new(&self.gl, index_buffer, vertex_buffer);
self.gl.draw_elements(glow::TRIANGLES, vertex_array.get_indices_len() as i32, glow::UNSIGNED_INT, 0);
}
}
I'm using egui with the glow backend which has its own gl context so i made sure to reset everything before drawing. Obviously this needs refactoring since the resources shouldnt be created every draw but i wanted to get it working first.
texture.init
pub(crate) fn init(&mut self, gl: &glow::Context) {
match image::open(String::from(self.get_path())) {
Err(err) => panic!("Could not load image {}: {}", self.get_path(), err),
Ok(img) => unsafe {
let (width, height) = img.dimensions();
let (image, internal_format, data_format) = match img {
DynamicImage::ImageRgb8(img) => (img.into_raw(), glow::RGB8, glow::RGB),
DynamicImage::ImageRgba8(img) => (img.into_raw(), glow::RGBA8, glow::RGBA),
img => (img.to_rgb8().into_raw(), glow::RGB8, glow::RGB)
};
let renderer_id = gl.create_texture().unwrap();
gl.bind_texture(glow::TEXTURE_2D, Some(renderer_id));
gl.tex_storage_2d(glow::TEXTURE_2D, 1, internal_format, width as i32, height as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_WRAP_S, glow::REPEAT as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_WRAP_T, glow::REPEAT as i32);
gl.tex_sub_image_2d(glow::TEXTURE_2D, 0, 0, 0, width as i32, height as i32, data_format, glow::UNSIGNED_BYTE, PixelUnpackData::Slice(image.as_slice()));
gl.generate_mipmap(glow::TEXTURE_2D);
self.set_renderer_id(renderer_id);
}
}
nsight depth state
this is what it looks like
EDIT:
how the window is created
fn create_display(
event_loop: &glutin::event_loop::EventLoop<()>,
title: &str
) -> (
glutin::WindowedContext<glutin::PossiblyCurrent>,
glow::Context,
) {
let window_builder = glutin::window::WindowBuilder::new()
.with_resizable(true)
.with_inner_size(glutin::dpi::LogicalSize {
width: 800.0,
height: 600.0,
})
.with_title(title);
let gl_window = unsafe {
glutin::ContextBuilder::new()
.with_depth_buffer(0)
.with_srgb(true)
.with_stencil_buffer(0)
.with_vsync(true)
.build_windowed(window_builder, event_loop)
.unwrap()
.make_current()
.unwrap()
};
let gl = unsafe { glow::Context::from_loader_function(|s| gl_window.get_proc_address(s)) };
unsafe {
use glow::HasContext as _;
gl.enable(glow::FRAMEBUFFER_SRGB);
}
(gl_window, gl)
}
The default framebuffer does not have a depth buffer. Therefore, the Depth Test does not work at all. You need to specify the depth buffer bits when creating the OpenGL window. e.g. 24 bits:
let gl_window = unsafe {
glutin::ContextBuilder::new()
.with_depth_buffer(24)
.with_srgb(true)
.with_stencil_buffer(0)
.with_vsync(true)
.build_windowed(window_builder, event_loop)
.unwrap()
.make_current()
.unwrap()
};

Mangled OpenGL rendering on some platforms

I have been experimenting with batched sprite rendering, and I've got a solution that works well on my desktop PC. However, trying it on my integrated Intel UHD 620 laptop I get the following performance warnings:
[21:42:03 error] OpenGL: API - Performance - Recompiling fragment shader for program 27
[21:42:03 error] OpenGL: API - Performance - multisampled FBO 0->1
Presumably because of the source of these performance warnings, frames that take 1-2 milliseconds on my dedicated graphics card machine are about 100 milliseconds on my laptop.
Here is my renderer code:
BatchedSpriteRenderer::BatchedSpriteRenderer(ResourceManager &resource_manager)
: resource_manager(&resource_manager),
max_sprites(100000),
vertex_array(std::make_unique<VertexArray>()),
vertex_buffer(std::make_unique<VertexBuffer>())
{
resource_manager.load_shader("batched_texture",
"shaders/texture_batched.vert",
"shaders/texture.frag");
std::vector<unsigned int> sprite_indices;
for (int i = 0; i < max_sprites; ++i)
{
unsigned int sprite_number = i * 4;
sprite_indices.push_back(0 + sprite_number);
sprite_indices.push_back(1 + sprite_number);
sprite_indices.push_back(2 + sprite_number);
sprite_indices.push_back(2 + sprite_number);
sprite_indices.push_back(3 + sprite_number);
sprite_indices.push_back(0 + sprite_number);
}
element_buffer = std::make_unique<ElementBuffer>(sprite_indices.data(), max_sprites * 6);
VertexBufferLayout layout;
layout.push<float>(2);
layout.push<float>(2);
layout.push<float>(4);
vertex_array->add_buffer(*vertex_buffer, layout);
}
void BatchedSpriteRenderer::draw(const std::string &texture,
const std::vector<glm::mat4> &transforms,
const glm::mat4 &view)
{
vertex_array->bind();
auto shader = resource_manager->shader_store.get("batched_texture");
shader->bind();
std::vector<SpriteVertex> vertices;
vertices.reserve(transforms.size() * 4);
for (const auto &transform : transforms)
{
glm::vec4 transformed_position = transform * glm::vec4(0.0, 1.0, 1.0, 1.0);
vertices.push_back({glm::vec2(transformed_position.x, transformed_position.y),
glm::vec2(0.0, 1.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)});
transformed_position = transform * glm::vec4(0.0, 0.0, 1.0, 1.0);
vertices.push_back({glm::vec2(transformed_position.x, transformed_position.y),
glm::vec2(0.0, 0.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)});
transformed_position = transform * glm::vec4(1.0, 0.0, 1.0, 1.0);
vertices.push_back({glm::vec2(transformed_position.x, transformed_position.y),
glm::vec2(1.0, 0.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)});
transformed_position = transform * glm::vec4(1.0, 1.0, 1.0, 1.0);
vertices.push_back({glm::vec2(transformed_position.x, transformed_position.y),
glm::vec2(1.0, 1.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)});
}
vertex_buffer->add_data(vertices.data(),
sizeof(SpriteVertex) * vertices.size(),
GL_DYNAMIC_DRAW);
shader->set_uniform_mat4f("u_view", view);
shader->set_uniform_1i("u_texture", 0);
resource_manager->texture_store.get(texture)->bind();
glDrawElements(GL_TRIANGLES, transforms.size() * 6, GL_UNSIGNED_INT, 0);
}
Hopefully my abstractions should be fairly self explanatory. Each of the abstraction classes (VertexArray, VertexBuffer, ElementBuffer,VertexBufferLayout) manage the lifetime of their equivalent OpenGL object.
Here are the shaders being used:
texture_batched.vert
#version 430 core
layout(location = 0)in vec2 v_position;
layout(location = 1)in vec2 v_tex_coord;
layout(location = 2)in vec4 v_color;
out vec4 color;
out vec2 tex_coord;
uniform mat4 u_view;
void main()
{
tex_coord = v_tex_coord;
gl_Position = u_view * vec4(v_position, 0.0, 1.0);
color = v_color;
}
texture.frag
#version 430 core
in vec4 color;
in vec2 tex_coord;
out vec4 frag_color;
uniform sampler2D u_texture;
void main()
{
frag_color = texture(u_texture, tex_coord);
frag_color *= color;
}
What's causing these performance issues, and how can I fix them?
EDIT: I completely forgot to mention that the actual image rendered with this is completely messed up, I'll try and grab a screenshot of it working correctly when I get to my desktop PC, but here's what the broken version looks like:
It should be a neat grid of 200x200 white circles.
EDIT 2: I tried it on another computer, this time with a GTX 1050 Ti, and it is also broken. No error messages or warnings this time. The warning may have been unrelated.
It ended up being unrelated to OpenGL, as far as I can see.
In the draw function, I create a vector called vertices, then put all the vertices in it. For some reason, when I recreate that vector every frame the following push_back calls aren't properly adding to the vector. The members of the SpriteVertex structs were getting mixed up. As such, instead of the correct layout:
pos tex_coord color
pos tex_coord color
pos tex_coord color
pos tex_coord color
It was being filled in the following layout:
pos tex_coord color
tex_coord pos color
tex_coord pos color
tex_coord pos color
Or something to that effect, at least.
I changed it so that the vertices vector is a member of the BatchedSpriteRenderer class, reserving space for the maximum possible number of vertices.
void BatchedSpriteRenderer::draw(const std::string &texture,
const std::vector<glm::mat4> &transforms,
const glm::mat4 &view)
{
vertex_array->bind();
auto shader = resource_manager->shader_store.get("batched_texture");
shader->bind();
for (unsigned int i = 0; i < transforms.size(); ++i)
{
const auto &transform = transforms[i];
glm::vec4 transformed_position = transform * glm::vec4(0.0, 1.0, 1.0, 1.0);
vertices[i * 4] = {glm::vec2(transformed_position.x,
transformed_position.y),
glm::vec2(0.0, 1.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)};
transformed_position = transform * glm::vec4(0.0, 0.0, 1.0, 1.0);
vertices[i * 4 + 1] = {glm::vec2(transformed_position.x,
transformed_position.y),
glm::vec2(0.0, 0.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)};
transformed_position = transform * glm::vec4(1.0, 0.0, 1.0, 1.0);
vertices[i * 4 + 2] = {glm::vec2(transformed_position.x,
transformed_position.y),
glm::vec2(1.0, 0.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)};
transformed_position = transform * glm::vec4(1.0, 1.0, 1.0, 1.0);
vertices[i * 4 + 3] = {glm::vec2(transformed_position.x,
transformed_position.y),
glm::vec2(1.0, 1.0),
glm::vec4(1.0, 1.0, 1.0, 1.0)};
}
vertex_buffer->add_data(vertices.data(),
sizeof(SpriteVertex) * (transforms.size() * 4),
GL_DYNAMIC_DRAW);
shader->set_uniform_mat4f("u_view", view);
shader->set_uniform_1i("u_texture", 0);
resource_manager->texture_store.get(texture)->bind();
glDrawElements(GL_TRIANGLES, transforms.size() * 6, GL_UNSIGNED_INT, 0);
}

How to avoid black lines between triangles on modern GPUs?

I am building a parametric 3d modeler with obj export.
I am really puzzled.
I have changed my GPU last night and now, there are cracks between the vertices, I can see what is behind. My old card was an Nvidia GTX275 and the one, NVIDIA GTX960. I didn't change anything in the code, shader or otherwise. I use only flat colors (not textures).
On the image below, the black lines that cut the pentagon into triangle shouldn't be there.
It seems to be purely an OpenGL problem as when I export the model and look in Blender, the faces are contiguous and there is no duplicate vertices.
the shader code is quite simple :
_VERTEX2 = """
#version 330
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0 ) in vec3 position;
layout(location = 1 ) in vec4 color;
layout(location = 2 ) in vec3 normal;
varying vec4 baseColor;
// uniform mat4 proj;
layout(location = 0) uniform mat4 view;
layout(location = 4) uniform mat4 proj;
varying vec3 fragVertexEc;
void main(void) {
gl_Position = proj * view * vec4(position, 1.0);
fragVertexEc = (view * vec4(position, 1.0)).xyz;
baseColor = color;
}
"""
_FRAGMENT2 = """
#version 330
#extension GL_OES_standard_derivatives : enable
varying vec3 fragVertexEc;
varying vec4 baseColor;
const vec3 lightPosEc = vec3(0,0,10);
const vec3 lightColor = vec3(1.0,1.0,1.0);
void main()
{
vec3 X = dFdx(fragVertexEc);
vec3 Y = dFdy(fragVertexEc);
vec3 normal=normalize(cross(X,Y));
vec3 lightDirection = normalize(lightPosEc - fragVertexEc);
float light = max(0.0, dot(lightDirection, normal));
gl_FragColor = vec4(normal, 1.0);
gl_FragColor = vec4(baseColor.xyz * light, baseColor.w);
}
"""
the rendering code is also quite simple :
def draw(self, view_transform, proj, transform):
self.shader.use()
gl_wrap.glBindVertexArray(self.vao)
try:
self.vbo.bind()
view_transform = view_transform * transform
GL.glUniformMatrix4fv(0, 1, False, (ctypes.c_float*16)(*view_transform.toList()))
GL.glUniformMatrix4fv(4, 1, False, (ctypes.c_float*16)(*proj.toList()))
GL.glEnableVertexAttribArray(self.shader.attrib['position'])
GL.glEnableVertexAttribArray(self.shader.attrib['color'])
GL.glEnableVertexAttribArray(self.shader.attrib['normal'])
STRIDE = 40
GL.glVertexAttribPointer(
self.shader.attrib['position'], len(Vector._fields), GL.GL_FLOAT,
False, STRIDE, self.vbo)
GL.glVertexAttribPointer(
self.shader.attrib['color'], len(Color._fields), GL.GL_FLOAT,
False, STRIDE, self.vbo+12)
GL.glVertexAttribPointer(
self.shader.attrib['normal'], len(Vector._fields), GL.GL_FLOAT,
False, STRIDE, self.vbo+28)
GL.glDrawElements(
GL.GL_TRIANGLES,
len(self.glindices),
self.index_type,
self.glindices)
finally:
self.vbo.unbind()
gl_wrap.glBindVertexArray(0)
self.shader.unuse()
A simple quad has the following data sent to OpenGL :
vertices ( flat array of pos+rgba+normal) :
[5.0, -3.061616997868383e-16, 5.0, 0.898, 0.0, 0.0, 1.0, 0.0, 1.0, 6.123233995736766e-17,
-5.0, 3.061616997868383e-16, -5.0, 0.898, 0.0, 0.0, 1.0, 0.0, 1.0, 6.123233995736766e-17,
-5.0, -3.061616997868383e-16, 5.0, 0.898, 0.0, 0.0, 1.0, 0.0, 1.0, 6.123233995736766e-17,
5.0, 3.061616997868383e-16, -5.0, 0.898, 0.0, 0.0, 1.0, 0.0, 1.0, 6.123233995736766e-17]
indices :: [0, 1, 2, 0, 3, 1]
ok, got it !
the strange display is due to glEnabled(GL_POLYGON_SMOOTH).
I had completely forgotten about it as my old card wasn't doing anything with this setting. It wasn't visible at least.
But the new one has the correct behaviour : it tries to anti-alias each triangle independently, hence the black lines.
Many thanks to #Jerem who helped me cover the other possibilities.

Sun shader not working

I'm trying to get a sun shader to work, but I can't get it to work.
What I currently get is a quarter of a circle/elipsis on the lower left of my screen, that is really stuck to my screen (if I move the camera, it also moves).
All I do is render two triangles to form a screen-covering quad, with screen width and height in uniforms.
Vertex Shader
#version 430 core
void main(void) {
const vec4 vertices[6] = vec4[](
vec4(-1.0, -1.0, 1.0, 1.0),
vec4(-1.0, 1.0, 1.0, 1.0),
vec4(1.0, 1.0, 1.0, 1.0),
vec4(1.0, 1.0, 1.0, 1.0),
vec4(1.0, -1.0, 1.0, 1.0),
vec4(-1.0, -1.0, 1.0, 1.0)
);
gl_Position = vertices[gl_VertexID];
}
Fragment Shader
#version 430 core
layout(location = 7) uniform int screen_width;
layout(location = 8) uniform int screen_height;
layout(location = 1) uniform mat4 view_matrix;
layout(location = 2) uniform mat4 proj_matrix;
out vec4 color;
uniform vec3 light_pos = vec3(-20.0, 7.5, -20.0);
void main(void) {
//calculate light position in screen space and get x, y components
vec2 screen_space_light_pos = (proj_matrix * view_matrix * vec4(light_pos, 1.0)).xy;
//calculate fragment position in screen space
vec2 screen_space_fragment_pos = vec2(gl_FragCoord.x / screen_width, gl_FragCoord.y / screen_height);
//check if it is in the radius of the sun
if (length(screen_space_light_pos - screen_space_fragment_pos) < 0.1) {
color = vec4(1.0, 1.0, 0.0, 1.0);
}
else {
discard;
}
}
What I think it does:
Get the position of the sun (light_pos) in screen space.
Get the fragment position in screen space.
If the distance between them is below a certain value, draw fragment with yellow color;
Else discard.
screen_space_light_pos is not yet in clip space. You've missed perspective division:
vec3 before_division = (proj_matrix * view_matrix * vec4(light_pos, 1.0)).xyw;
vec2 screen_space_light_pos = before_division.xy / before_division.z;
With common proj_matrix configurations, screen_space_light_pos will be in [-1,1]x[-1,1]. To match screen_space_fragment_pos range, you probably need to adjust screen_space_light_pos:
screen_space_light_pos = screen_space_light_pos * 0.5 + 0.5;

Drawing Antialiased circle using Shaders

I am new to shader programming. I am trying to draw a circle with glsl. I used a point with a Size and tried to filter out the points outside the radius.(Altering the alpha value).
The code is as follows:
Fragment Shader:
#version 130
varying vec2 textureCoordinate;
const float circleBorderWidth = 0.08;//for anti aliasing
void main() {
float d = smoothstep(circleBorderWidth,0.1, 1.0-length(textureCoordinate));
gl_FragColor = vec4(0.0, 1.0, 0.0, d);
}
Vertex Shader:
#version 130
attribute vec4 coord3d;
attribute vec2 varPos;
varying vec2 textureCoordinate;
void
main()
{
textureCoordinate = varPos;
gl_FrontColor = gl_Color;
gl_Position = vec4(coord3d.xyz,1.);
gl_PointSize = coord3d.w;
}
Data:
float pos[] = {
-1, -1,
-1, 1,
1, 1,
1, -1,
};
float vertices[]={0.0,0.0f,0.0f,100.0f};
Draw Method:
void drawScene() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
program->makeCurrent();
glEnable(GL_POINT_SMOOTH);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
glEnable(GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if (varPosAttrib>=0) {
glVertexAttribPointer( varPosAttrib, 2, GL_FLOAT, GL_FALSE,
0, pos ); // -->varPos in Vertex Shader.
glEnableVertexAttribArray( varPosAttrib );
}
if (posAttrib>=0) {
glVertexAttribPointer(posAttrib, 4, GL_FLOAT, GL_FALSE, 0, vertices); // -->coord3d in vertex shader
glEnableVertexAttribArray(posAttrib);
glDrawArrays(GL_POINTS, 0, 1);
}
glDisable(GL_POINT_SMOOTH);
glDisable(GL_VERTEX_PROGRAM_POINT_SIZE);
glDisable(GL_BLEND);
program->release();
glutSwapBuffers(); //Send the 3D scene to the screen
}
This results in drawing a square if I replace d with 1.0 in the following line (in the fragment shader):
gl_FragColor = vec4(0.0, 1.0, 0.0, d); // -> if d is replaced by 1.0
I tried to replace the x and y values in gl_FragColor with textureCoordinate.x and textureCoordinate.y. Result was black (so I assume the values are 0.0). The thing which I don't understand is that if I take the length of textureCoordinate than it is always 1.0.(experimented by replacing the value in gl_fragcolor). I am unable to figure out as to what I am doing wrong here. I was expecting the textureCoordinate value to interpolate with respect to the passed in data (varPos).
Here's my current attempt at it. It works, in the sense that it draw a disc with a smooth border. I use a distance field approach ie. I compute the distance from the disc's border
Fragment shader
#version 110
varying vec2 uv;
void
main() {
float border = 0.01;
float radius = 0.5;
vec4 color0 = vec4(0.0, 0.0, 0.0, 1.0);
vec4 color1 = vec4(1.0, 1.0, 1.0, 1.0);
vec2 m = uv - vec2(0.5, 0.5);
float dist = radius - sqrt(m.x * m.x + m.y * m.y);
float t = 0.0;
if (dist > border)
t = 1.0;
else if (dist > 0.0)
t = dist / border;
gl_FragColor = mix(color0, color1, t);
}
Vertex shader
#version 110
varying vec2 uv;
void
main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
uv = vec2(gl_MultiTexCoord0);
}
It's meant to be drawn on quad with texture coordinate (-0.5, -0.5)x(0.5, 0.5)