Texture Coordinates OSG GLSL - c++

I have an image 500x500 pixels which I converted as Texture2D to GLSL and return back the raw data to C++/OSG. I have faced problems with texture coordinates (the coordinates on GLSL goes from 0 to 1). Can someone help me with this point?
C++ code:
cv::Mat test = cv::Mat::zeros(512, 512, CV_8UC3);
test(cv::Rect( 0, 0, 255, 255)).setTo(cv::Scalar(255,0,0));
test(cv::Rect(256, 0, 255, 255)).setTo(cv::Scalar(0,255,0));
test(cv::Rect( 0, 256, 255, 255)).setTo(cv::Scalar(0,0,255));
test(cv::Rect(256, 256, 255, 255)).setTo(cv::Scalar(255,255,255));
osg::ref_ptr<osg::Image> image = new osg::Image;
image->setImage(512, 512, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, test.data, osg::Image::NO_DELETE, 1);
osg::ref_ptr<osg::Texture2D> texture = new osg::Texture2D;
texture->setTextureSize(512, 512);
texture->setImage(image);
// Pass the texture to GLSL as uniform
osg::StateSet* ss = scene->getOrCreateStateSet();
osg::Uniform* samUniform = new osg::Uniform(osg::Uniform::SAMPLER_2D, "vertexMap");
samUniform->set(0);
ss->addUniform(samUniform);
ss->setTextureAttributeAndModes(0, texture, osg::StateAttribute::ON);
Vertex code:
#version 130
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
}
Fragment code:
#version 130
uniform sampler2D vertexMap;
out vec4 out_data;
void main() {
vec3 value = texture2D(vertexMap, gl_TexCoord[0].st).xyz;
out_data = vec4(value, 1);
}
This is my input data:
Output data from shader:

I solved by replacing texture2D by texelFetch on fragment shader. The difference between these two functions can be found here: https://gamedev.stackexchange.com/questions/66448/how-do-opengls-texelfetch-and-texture-differ
#version 130
uniform sampler2D vertexMap;
out vec4 out_data;
void main() {
vec3 value = texelFetch(vertexMap, ivec2(gl_FragCoord.xy), 0).xyz;
out_data = vec4(value, 1);
}

Related

glDrawArraysInstanced behave weird when camera move far from the screen

What I want to attrive is to render many small quads with this opengl function "glDrawArraysInstanced", the space between them is the same. For example, please refer to the follwing image:
The code is as follow:
void OpenGLShowVideo::displayBySmallMatrix()
{
// Now use QOpenGLExtraFunctions instead of QOpenGLFunctions as we want to
// do more than what GL(ES) 2.0 offers.
QOpenGLExtraFunctions *f = QOpenGLContext::currentContext()->extraFunctions();
f->glClearColor(9.f/255.0f, 14.f/255.0f, 15.f/255.0f, 1);
glClear(GL_COLOR_BUFFER_BIT);
f->glViewport(0, 0, this->width(), this->height());
m_displayByMatrixProgram->bind();
f->glActiveTexture(GL_TEXTURE0 + m_acRenderToScreenTexUnit);
f->glBindTexture(GL_TEXTURE_2D, m_renderWithMaskFbo->texture());
if (m_uniformsDirty) {
m_uniformsDirty = false;
m_displayByMatrixProgram->setUniformValue(m_samplerLoc, m_acRenderToScreenTexUnit);
m_proj.setToIdentity();
m_proj.perspective(INIT_VERTICAL_ANGLE, float(this->width()) / float(this->height()), m_fNearPlane, m_fFarPlane);
m_displayByMatrixProgram->setUniformValue(m_projMatrixLoc, m_proj);
QMatrix4x4 camera;
camera.lookAt(m_eye, m_eye + m_target, QVector3D(0, 1, 0));
m_displayByMatrixProgram->setUniformValue(m_camMatrixLoc, camera);
m_world.setToIdentity();
float fOffsetZ = m_fVerticalAngle / INIT_VERTICAL_ANGLE;
m_world.translate(m_fMatrixOffsetX, m_fMatrixOffsetY, fOffsetZ);
m_proj.scale(MATRIX_INIT_SCALE_X, MATRIX_INIT_SCALE_Y, 1.0f);
m_world.rotate(180, 1, 0, 0);
QMatrix4x4 wm = m_world;
m_displayByMatrixProgram->setUniformValue(m_worldMatrixLoc, wm);
QMatrix4x4 mm;
mm.setToIdentity();
m_displayByMatrixProgram->setUniformValue(m_myMatrixLoc, mm);
m_displayByMatrixProgram->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 70));
QSize tmpSize = QSize(m_viewPortWidth, m_viewPortHeight);
m_displayByMatrixProgram->setUniformValue(m_resolutionLoc, tmpSize);
int whRatioVal = m_viewPortWidth / m_viewPortHeight;
m_displayByMatrixProgram->setUniformValue(m_whRatioLoc, whRatioVal);
}
m_geometries->bindBufferForArraysInstancedDraw();
f->glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, m_viewPortWidth * m_viewPortHeight);
}
And the vertex shader code is as follow:
#version 330
layout(location = 0) in vec4 vertex;
out vec3 color;
uniform mat4 mvp_matrix;
uniform mat4 projMatrix;
uniform mat4 camMatrix;
uniform mat4 worldMatrix;
uniform mat4 myMatrix;
uniform vec2 viewResolution;
uniform int whRatio;
uniform sampler2D sampler;
void main() {
int posX = gl_InstanceID % int(viewResolution.x);
int posY = gl_InstanceID / int(viewResolution.y);
if( posY % whRatio < whRatio) {
posY = gl_InstanceID / int(viewResolution.x);
}
ivec2 pos = ivec2(posX, posY);
vec2 t = vec2( pos.x * 3.0, pos.y * 3.0 );
mat4 wm = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t.x, t.y, 1, 1) * worldMatrix;
color = texelFetch(sampler,pos,0).rgb;
gl_Position = projMatrix * camMatrix * wm * vertex;
}
And the fragment shader is as follow:
#version 330 core
in vec3 color;
out vec4 fragColor;
void main() {
fragColor = vec4(color, 1.0);
}
However, when I move the camera far from the screen (by changing the [camera.lookAt (m_eye, m_eye + m_target, QVector3D (0, 1, 0);] "m_eye" parameter value), I got sth like this:
The space between quads is different, and the size of the quad is also different. But when I move the camera closer to the screen, it looks much better.
I think what you're seeing there is the result of rounding the coordinates to the nearest integer pixel coordinate.
To get something that looks more even, you want to use some form of anti-aliasing. The options that spring to mind are:
Enable some sort of full screen anti-aliasing like MSAA. This is simple to enable, but can have a significant performance cost.
Put your pattern in a texture, and tile that texture over a single quad. Texture filtering and mip maps should take care of the anti-aliasing for you, and it will probably be faster to render that way as well because you only need a single quad.

Vertex shader animation is rotating with camera

I am new to shaders, and I want to animate an object with the vertex shader.
Right now I just want to move it with a constant. For some reason, instead of going in the x-direction of the world, it moves in the x-direction of the camera. (So whenever I turn the camera, the object rotates with me)
The project is in processing, but I don't think it affects the shader.
THE PROCESSING CODE:
PShader sdr;
void setup() {
size(1000, 1000, P3D);
noStroke();
sdr = loadShader("shdFrag.glsl", "shdVert.glsl");
}
void draw() {
background(200);
// Set camera
camera(0, -300, 700, mouseX-500, 0, 200, 0, 1, 0);
// Ground
resetShader();
beginShape();
fill(100);
vertex(-500, 0, 500);
vertex( 500, 0, 500);
vertex( 500, 0, -500);
vertex(-500, 0, -500);
endShape();
// Red Sphere
shader(sdr);
fill(255, 0, 0);
sphere(100);
}
VERTEX SHADER:
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
out vec4 vertColor;
void main() {
vec4 pos = position;
pos.x += 300;
vertColor = color;
gl_Position = transform * pos;
}
FRAGMENT SHADER:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
in vec4 vertColor;
void main() {
vec4 color = vertColor;
gl_FragColor = vec4(color.xyz, 1);
}
A GIF of what is happening:
the scene with a sphere

openGL- Drawing Grid of Quads and Manually paint them

I'm doing a simple image processing app using OpenGL and C++.
However, there is one particular thing that I don't know how to do, which is:
I need to let my user to draw a Histogram Graph.
The way I thought to do this is by creating a grid of quads one quad for each pixel intesity of my image. Example: if the image is 8 bits, I would need 256x256 quads in my grid. After drawing the grid I want my to user manually paint the quads in a quantized way (each quad) in the way that he could "draw" the histogram. The problem is that I dont know how to do any of these things...
Would anyone give me direction on how to draw the grid, and how to make the paiting thing.
Iif you're confused about "drawing histogram" just considerit as a regular graph.
You don't have to draw a grid of quads. Just one quad is enough, and then use a shader to sample from the histogram stored in a 1d-texture. Here is what I get:
Vertex shader:
#version 450 core
layout(std140, binding = 0) uniform view_block {
vec2 scale, offset;
} VIEW;
layout(std140, binding = 1) uniform draw_block {
vec4 position;
float max_value;
} DRAW;
out gl_PerVertex {
vec4 gl_Position;
};
void main()
{
ivec2 id = ivec2(gl_VertexID&1, gl_VertexID>>1);
vec2 position = vec2(DRAW.position[id.x<<1], DRAW.position[(id.y<<1) + 1]);
gl_Position = vec4(fma(position, VIEW.scale, VIEW.offset), 0, 1);
}
Fragment shader:
#version 450 core
layout(std140, binding = 1) uniform draw_block {
vec4 position;
float max_value;
} DRAW;
layout(binding = 0) uniform sampler1D hist;
layout(location = 0) out vec4 OUT;
void main()
{
const vec2 extent = DRAW.position.zw - DRAW.position.xy;
vec2 texcoord = (gl_FragCoord.xy - DRAW.position.xy)/(DRAW.position.zw - DRAW.position.xy);
OUT.rgb = vec3(lessThan(texcoord.yyy*DRAW.max_value, texture(hist, texcoord.x).rgb));
OUT.a = 1;
}
Histogram texture creation:
image hist(256, 1, 3, type_float);
// ... calculate the histogram ...
tex.reset(glCreateTextureSN(GL_TEXTURE_1D));
glTextureStorage1D(tex.get(), 1, GL_RGB32F, hist.w);
glTextureSubImage1D(tex.get(), 0, 0, hist.w, GL_RGB, GL_FLOAT, hist.c[0]);
glTextureParameteri(tex.get(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
Rendering routine:
const vec2i vs = { glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT) };
glViewport(0, 0, vs[0], vs[1]);
glClear(GL_COLOR_BUFFER_BIT);
struct view_block {
vec2f scale, offset;
} VIEW = {
vec2f(2)/vec2f(vs), -vec2f(1)
};
GLbuffer view_buf(glCreateBufferStorageSN(sizeof(VIEW), &VIEW, 0));
glBindBufferBase(GL_UNIFORM_BUFFER, 0, view_buf.get());
struct draw_block {
box2f position;
float max_value;
} DRAW = {
box2f(0, 0, vs[0], vs[1]),
max_value
};
GLbuffer draw_buf(glCreateBufferStorageSN(sizeof(DRAW), &DRAW, 0));
glBindBufferBase(GL_UNIFORM_BUFFER, 1, draw_buf.get());
bind_textures(tex.get());
glBindProgramPipeline(pp.get());
glBindVertexArray(0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glutSwapBuffers();

Multiple output textures from the same program

I'm trying to learn how to do multiple outputs from the same program in WebGL2 leveraging gl.drawBuffer() capabilities.
I looked at the book "OpenGL ES 3.0 Programming Guide", chapter 11 where it lists what is needed for multi-output to take place. However the shader source example is very trivial outputting only constant values.
I'd like to know if someone has a better example? or if one could explain what happened to the TextureCoordinates varying? In normal shader code I would use that to find data values from my inputs and write them out. Now in the face of multiple layouts, how would the TextureCoordinates varying correspond to each layout? What happens to the dimensions of my viewPort? which output Texture does that correspond with?
Here are some steps the way I understood them:
Create a Color attachment array GL_COLOR_ATTACHMENT0, ...
Create a framebuffer object for each output
Create output textures
For each FB:
BindFramebuffer
BindTexture
Associate texture with FBO: frameBufferTexture2D (..., color_attchment_from_step1)
call drawBuffers passing the color attachment array
Inside the shader access output values like this:
layout(location = 0) out vec4 fragData0;
layout(location = 1) out vec4 fragData1;
You only need one framebuffer object. You attach all the textures to it. So your steps would be
Create a framebuffer object and BindFramebuffer
Create output textures
For each texture:
Associate texture with FBO: frameBufferTexture2D(...)
Create a Color attachment array GL_COLOR_ATTACHMENT0, ...
call drawBuffers passing the color attachment array
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need WebGL2");
}
const vs = `
#version 300 es
void main() {
gl_PointSize = 300.0;
gl_Position = vec4(0, 0, 0, 1);
}
`;
const fs = `
#version 300 es
precision mediump float;
layout(location = 0) out vec4 outColor0;
layout(location = 1) out vec4 outColor1;
layout(location = 2) out vec4 outColor2;
layout(location = 3) out vec4 outColor3;
void main() {
outColor0 = vec4(1, .5, .3, .7); // orange
outColor1 = vec4(.6, .5, .4, .3); // brown
outColor2 = vec4(.2, .8, .0, 1); // green
outColor3 = vec4(.3, .4, .9, .6); // blue
}
`
const program = twgl.createProgram(gl, [vs, fs]);
const textures = [];
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
for (let i = 0; i < 4; ++i) {
const tex = gl.createTexture();
textures.push(tex);
gl.bindTexture(gl.TEXTURE_2D, tex);
const width = 1;
const height = 1;
const level = 0;
gl.texImage2D(gl.TEXTURE_2D, level, gl.RGBA, width, height, 0,
gl.RGBA, gl.UNSIGNED_BYTE, null);
// attach texture to framebuffer
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + i,
gl.TEXTURE_2D, tex, level);
}
// our framebuffer textures are only 1x1 pixels
gl.viewport(0, 0, 1, 1);
// tell it we want to draw to all 4 attachments
gl.drawBuffers([
gl.COLOR_ATTACHMENT0,
gl.COLOR_ATTACHMENT1,
gl.COLOR_ATTACHMENT2,
gl.COLOR_ATTACHMENT3,
]);
// draw a single point
gl.useProgram(program);
{
const offset = 0;
const count = 1
gl.drawArrays(gl.POINT, 0, 1);
}
// --- below this is not relevant to the question but just so we
// --- we can see it's working
// render the 4 textures
const fs2 = `
#version 300 es
precision mediump float;
uniform sampler2D tex[4];
out vec4 outColor;
void main() {
vec4 colors[4];
// you can't index textures with non-constant integer expressions
// in WebGL2 (you can in WebGL1 lol)
colors[0] = texture(tex[0], vec2(0));
colors[1] = texture(tex[1], vec2(0));
colors[2] = texture(tex[2], vec2(0));
colors[3] = texture(tex[3], vec2(0));
vec4 color = vec4(0);
for (int i = 0; i < 4; ++i) {
float x = gl_PointCoord.x * 4.0;
float amount = step(float(i), x) * step(x, float(i + 1));
color = mix(color, colors[i], amount);
}
outColor = vec4(color.rgb, 1);
}
`;
const prgInfo2 = twgl.createProgramInfo(gl, [vs, fs2]);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(prgInfo2.program);
// binds all the textures and set the uniforms
twgl.setUniforms(prgInfo2, {
tex: textures,
});
gl.drawArrays(gl.POINTS, 0, 1);
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>

OpenGL: Multisampling texture y axis inverted?

I have a problem with a multisampled texture. It seems after blitting it to another surface for rendering, it's flipped upside down. What might cause that ? Should I provide some code ?
edit: Well, it's gonna be a lot of code, but here we go. This is how I create my surfaces / textures:
protected override void Create(int width, int height, SurfaceFormat format)
{
this.format = format;
bool multisample = format.Multisampling > 0;
int samples = Math.Max(0, Math.Min(format.Multisampling, 4));
format.TextureTarget = multisample ? TextureTarget.Texture2DMultisample : format.TextureTarget;
format.MipMapping = format.MipMapping && format.TextureTarget == TextureTarget.Texture2D;
Width = width;
Height = height;
textureHandle = GL.GenTexture();
//bind texture
GL.BindTexture(format.TextureTarget, textureHandle);
Log.Error("Bound Texture: " + GL.GetError());
if (format.TextureTarget == TextureTarget.Texture2D)
{
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureMinFilter, (int)(format.MipMapping ? TextureMinFilter.LinearMipmapLinear : TextureMinFilter.Linear));
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureWrapS, (int)format.WrapMode);
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureWrapT, (int)format.WrapMode);
}
Log.Debug("Created Texture Parameters: " + GL.GetError());
if (samples < 1)
GL.TexImage2D(format.TextureTarget, 0, format.InternalFormat, Width, Height, 0, format.PixelFormat, format.SourceType, format.Pixels);
else
GL.TexImage2DMultisample(TextureTargetMultisample.Texture2DMultisample, samples, format.InternalFormat, Width, Height, true);
if (format.MipMapping)
GL.GenerateMipmap(GenerateMipmapTarget.Texture2D);
Log.Debug("Created Image: " + GL.GetError());
//unbind texture
GL.BindTexture(format.TextureTarget, 0);
//create depthbuffer
if (format.DepthBuffer)
{
GL.GenRenderbuffers(1, out dbHandle);
GL.BindRenderbuffer(RenderbufferTarget.RenderbufferExt, dbHandle);
if(multisample)
GL.RenderbufferStorageMultisample(RenderbufferTarget.RenderbufferExt, samples, RenderbufferStorage.DepthComponent24, Width, Height);
else
GL.RenderbufferStorage(RenderbufferTarget.RenderbufferExt, RenderbufferStorage.DepthComponent24, Width, Height);
}
//create fbo
fboHandle = GL.GenFramebuffer();
GL.BindFramebuffer(FramebufferTarget.FramebufferExt, fboHandle);
GL.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.ColorAttachment0Ext, format.TextureTarget, textureHandle, 0);
if (format.DepthBuffer)
GL.FramebufferRenderbuffer(FramebufferTarget.FramebufferExt, FramebufferAttachment.DepthAttachmentExt, RenderbufferTarget.RenderbufferExt, dbHandle);
Log.Debug("Framebuffer status: " + GL.CheckFramebufferStatus(FramebufferTarget.FramebufferExt));
Log.Debug("Created Framebuffer: " + GL.GetError());
GL.BindFramebuffer(FramebufferTarget.FramebufferExt, 0);
}
creation:
var sf = SurfaceFormat.Surface2D;
sf.Multisampling = 4;
multisampler = new Surface(Window.Width, Window.Height, sf);
Now in the render loop I do the following:
//Render entire scene to multisampler
SceneRenderer.RenderMultisampled(ActiveCamera, multisampler, time);
//blit sampler to my material input texture
multisampler.CloneTo(postEffect.Textures["_tex"]);
//blit this texture to my "Canvas" (basically a surface with additional drawing methods. The canvas material is use as a texture for a quad in my scene, thus rendering a copy of the output image to a plane.
postEffect.Textures["_tex"].CloneTo(canvas.Surface);
//This would be the same but via rendering with a quad instead of blitting. Has the same result
//canvas.Clear();
//canvas.DrawMaterial(postEffect);
//clear framebuffer
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
//Set viewport
GL.Viewport(0, 0, Window.Width, Window.Height);
//use material (bind shader & shader params) and draw the scene.
postEffect.Use();
Helper.DrawScreenQuad();
GL.UseProgram(0);
If this is not enough, I can also post the shaders & mesh code.
EDIT2: Okay everything is now working as expected EXCEPT when I use canvas.draw() instead of blitting the texture. The draw method looks like this:
public void DrawMaterial(Material material)
{
GL.Viewport(0, 0, Surface.Width, Surface.Height);
Surface.BindFramebuffer();
material.Use();
Helper.DrawScreenQuad();
GL.UseProgram(0);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
}
Draw screen quad:
public static void DrawScreenQuad()
{
GL.Begin(PrimitiveType.Quads);
GL.TexCoord2(0, 1);
GL.Vertex2(-1, -1);
GL.TexCoord2(1, 1);
GL.Vertex2(1, -1);
GL.TexCoord2(1, 0);
GL.Vertex2(1, 1);
GL.TexCoord2(0, 0);
GL.Vertex2(-1, 1);
GL.End();
}
Shader used:
[Shader vertex]
#version 150 core
in vec2 _pos;
out vec2 texCoord;
uniform float _time;
uniform sampler2D tex;
void main() {
gl_Position = vec4(_pos, 0, 1);
texCoord = _pos/2+vec2(0.5,0.5);
texCoord.y = 1 - texCoord.y;
}
[Shader fragment]
#version 150 core
#define PI 3.1415926535897932384626433832795
out vec4 outColor;
uniform float _time;
uniform sampler2D tex;
in vec2 texCoord;
//
void main() {
outColor = texture2D(tex, texCoord);
}
Somehow the rendered scene gets turned upside down by this. Why ?
I think I found my mistake. I had the texture coordinates AND the camera inverted. It seems to be fixed now. Why I still don't undestand is, why this works:
[Shader vertex]
#version 150 core
in vec2 _pos;
out vec2 texCoord;
uniform float _time;
uniform sampler2D tex;
void main() {
gl_Position = vec4(_pos, 0, 1);
texCoord = _pos/2+vec2(0.5,0.5);
//texCoord.y = 1 - texCoord.y;
}
[Shader fragment]
#version 150 core
#define PI 3.1415926535897932384626433832795
out vec4 outColor;
uniform float _time;
uniform sampler2D tex;
in vec2 texCoord;
//
void main() {
outColor = texture2D(tex, texCoord);
}
I would've expected that the y coordinate of the tex coord would need to be inverted.