fragment shader for stipple pattern - opengl

I'm trying to draw a stipple pattern using a shader, like as in glStipple. However it does not seem to be working as expected.
The idea is to compare pixel coordinates against a stipple mask and discard fragments if the pixel coordinate mod mask size is 0.
However, two odd things are occurring which suggests the approach has some flaw I'm missing. The first is that the resulting stipple pattern does not match the mask. A single diagonal line in an 8x8 mask results in what appears to be diagonals spaced 1 pixel apart in the drawn shape. The second is that testing against 1, instead of 0, does not give a similar (but shifted) pattern.
The vertex shader looks like this:
static const char *vertexShaderSource =
"attribute highp vec4 posAttr;\n"
"attribute highp vec4 colAttr;\n"
"varying lowp vec4 col;\n"
"varying highp vec2 coord;\n"
"uniform highp mat4 matrix;\n"
"void main() {\n"
" col = colAttr;\n"
" gl_Position = matrix * posAttr;\n"
" coord = gl_Position.xy;\n"
"}\n";
and the fragment shader:
static const char *fragmentShaderSource =
"varying lowp vec4 col;\n"
"varying highp vec2 coord;\n"
"uniform int stipple[64];\n"
"uniform int width;\n"
"uniform int height;\n"
"void main() {\n"
" if (stipple[abs(mod(coord.x * width,8)) + abs(mod(coord.y * height,8)) * 8] == 1)\n"
" {\n"
" discard;\n"
" }\n"
" gl_FragColor = col;\n"
"}\n";
where width and height are the w/h of the viewport and the stipple is e.g.
GLint stipple[64] = {
0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0
};
Any ideas appreciated, thanks.

You can use gl_FragCoord. It contains the pixel's center coordinate. So for a resolution of 800x600 then gl_FragCoord would be in the range of vec2(0.5, 0.5) and vec2(799.5, 599.5). Thereby there's no need to multiply by the resolution, since we already basically have window coordinates.
Since gl_FragCoord wouldn't result in a negative number you can remove abs().
All in all with the few modifications, the fragment shader ends up looking like this:
varying vec4 color;
uniform int stipple[64];
void main()
{
ivec2 coord = ivec2(gl_FragCoord.xy - 0.5);
if (stipple[int(mod(coord.x, 8) + mod(coord.y, 8) * 8)] == 0)
discard;
gl_FragColor = color;
}

Related

glDrawArraysInstanced behave weird when camera move far from the screen

What I want to attrive is to render many small quads with this opengl function "glDrawArraysInstanced", the space between them is the same. For example, please refer to the follwing image:
The code is as follow:
void OpenGLShowVideo::displayBySmallMatrix()
{
// Now use QOpenGLExtraFunctions instead of QOpenGLFunctions as we want to
// do more than what GL(ES) 2.0 offers.
QOpenGLExtraFunctions *f = QOpenGLContext::currentContext()->extraFunctions();
f->glClearColor(9.f/255.0f, 14.f/255.0f, 15.f/255.0f, 1);
glClear(GL_COLOR_BUFFER_BIT);
f->glViewport(0, 0, this->width(), this->height());
m_displayByMatrixProgram->bind();
f->glActiveTexture(GL_TEXTURE0 + m_acRenderToScreenTexUnit);
f->glBindTexture(GL_TEXTURE_2D, m_renderWithMaskFbo->texture());
if (m_uniformsDirty) {
m_uniformsDirty = false;
m_displayByMatrixProgram->setUniformValue(m_samplerLoc, m_acRenderToScreenTexUnit);
m_proj.setToIdentity();
m_proj.perspective(INIT_VERTICAL_ANGLE, float(this->width()) / float(this->height()), m_fNearPlane, m_fFarPlane);
m_displayByMatrixProgram->setUniformValue(m_projMatrixLoc, m_proj);
QMatrix4x4 camera;
camera.lookAt(m_eye, m_eye + m_target, QVector3D(0, 1, 0));
m_displayByMatrixProgram->setUniformValue(m_camMatrixLoc, camera);
m_world.setToIdentity();
float fOffsetZ = m_fVerticalAngle / INIT_VERTICAL_ANGLE;
m_world.translate(m_fMatrixOffsetX, m_fMatrixOffsetY, fOffsetZ);
m_proj.scale(MATRIX_INIT_SCALE_X, MATRIX_INIT_SCALE_Y, 1.0f);
m_world.rotate(180, 1, 0, 0);
QMatrix4x4 wm = m_world;
m_displayByMatrixProgram->setUniformValue(m_worldMatrixLoc, wm);
QMatrix4x4 mm;
mm.setToIdentity();
m_displayByMatrixProgram->setUniformValue(m_myMatrixLoc, mm);
m_displayByMatrixProgram->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 70));
QSize tmpSize = QSize(m_viewPortWidth, m_viewPortHeight);
m_displayByMatrixProgram->setUniformValue(m_resolutionLoc, tmpSize);
int whRatioVal = m_viewPortWidth / m_viewPortHeight;
m_displayByMatrixProgram->setUniformValue(m_whRatioLoc, whRatioVal);
}
m_geometries->bindBufferForArraysInstancedDraw();
f->glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, m_viewPortWidth * m_viewPortHeight);
}
And the vertex shader code is as follow:
#version 330
layout(location = 0) in vec4 vertex;
out vec3 color;
uniform mat4 mvp_matrix;
uniform mat4 projMatrix;
uniform mat4 camMatrix;
uniform mat4 worldMatrix;
uniform mat4 myMatrix;
uniform vec2 viewResolution;
uniform int whRatio;
uniform sampler2D sampler;
void main() {
int posX = gl_InstanceID % int(viewResolution.x);
int posY = gl_InstanceID / int(viewResolution.y);
if( posY % whRatio < whRatio) {
posY = gl_InstanceID / int(viewResolution.x);
}
ivec2 pos = ivec2(posX, posY);
vec2 t = vec2( pos.x * 3.0, pos.y * 3.0 );
mat4 wm = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t.x, t.y, 1, 1) * worldMatrix;
color = texelFetch(sampler,pos,0).rgb;
gl_Position = projMatrix * camMatrix * wm * vertex;
}
And the fragment shader is as follow:
#version 330 core
in vec3 color;
out vec4 fragColor;
void main() {
fragColor = vec4(color, 1.0);
}
However, when I move the camera far from the screen (by changing the [camera.lookAt (m_eye, m_eye + m_target, QVector3D (0, 1, 0);] "m_eye" parameter value), I got sth like this:
The space between quads is different, and the size of the quad is also different. But when I move the camera closer to the screen, it looks much better.
I think what you're seeing there is the result of rounding the coordinates to the nearest integer pixel coordinate.
To get something that looks more even, you want to use some form of anti-aliasing. The options that spring to mind are:
Enable some sort of full screen anti-aliasing like MSAA. This is simple to enable, but can have a significant performance cost.
Put your pattern in a texture, and tile that texture over a single quad. Texture filtering and mip maps should take care of the anti-aliasing for you, and it will probably be faster to render that way as well because you only need a single quad.

Why differs gl_FragColor in opengl from the Colorbuffer?

I would like to read with glReadPixels() the value from the colorbuffer, which I previously wrote in the Fragmentshader via gl_FragColor. This works on average 10 times, then an erroneous value (1 = 255) occurs.
#version 420
uniform vec2 screenXy;
uniform vec2 screenSize;
out highp vec4 fragColor;
void main(void) {
if((int(gl_FragCoord.x) == int(screenXy.x)) && ((int(screenSize.y - 1) - int(gl_FragCoord.y)) == int(screenXy.y))) {
fragColor.r = 0.5; // any value
} else {
fragColor = vec4(1, 1, 1, 1.0);
}
I submit the mouse xy coordinates to the fragementshader (screenXy). If the clicked pixel is in the row, I write a value (e.g 0.5) in the color buffer. Now I observe that sometimes the value is 1 (= 255) instead of 0.5 (=128).
GLfloat zc[4]; // from color buffer
m_func->glReadPixels(xy.x(), (m_pFbo->height() - 1) - xy.y(), 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, z);
qDebug() << "z0 " << z[0];
I see this behavior on win10 and android.
Does anyone has an idea what i'm doing wrong?

Shader two outputs feedback

I want to get two outputs from my shader, then feed them back into the shader to use as an iterator och accumulator. This works fine for the default "channel", but for two... So, here are the shaders:
#version 420
uniform sampler2DRect inData0;
uniform sampler2DRect inData1;
out float outData0;
out float outData1;
void main(void)
{
outData0 = 1 + texture(inData0, gl_FragCoord.xy).r;
outData1 = -1 + texture(inData1, gl_FragCoord.xy).r;
}
and
#version 420
in vec2 position;
void main()
{
gl_Position = vec4(position, 0, 1);
}
then the standard stuff
Int32 frameBufferId = GL.GenFramebuffer();
GL.BindFramebuffer(FramebufferTarget.Framebuffer, frameBufferId);
Int32 textureId0 = GL.GenTexture();
GL.ActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.TextureRectangle, textureId0);
GL.TexImage2D(TextureTarget.TextureRectangle, 0, PixelInternalFormat.R32f, 1024, 1024, 0, PixelFormat.Red, PixelType.Float, new Single[1024 * 1024]);
GL.FramebufferTexture2D(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment0, TextureTarget.TextureRectangle, textureId0, 0);
Int32 textureId1 = GL.GenTexture();
GL.ActiveTexture(TextureUnit.Texture1);
GL.BindTexture(TextureTarget.TextureRectangle, textureId1);
GL.TexImage2D(TextureTarget.TextureRectangle, 0, PixelInternalFormat.R32f, 1024, 1024, 0, PixelFormat.Red, PixelType.Float, new Single[1024 * 1024]);
GL.FramebufferTexture2D(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment1, TextureTarget.TextureRectangle, textureId1, 0);
GL.TexParameter(TextureTarget.TextureRectangle, TextureParameterName.TextureMinFilter, (Int32)TextureMinFilter.Linear);
Single[] arrayBufferData = new Single[4 * 2] { -1, -1, 1, -1, 1, 1, -1, 1 };
Int32 arrayBufferId = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, arrayBufferId);
GL.BufferData(BufferTarget.ArrayBuffer, new IntPtr(4 * 2 * sizeof(Single)), arrayBufferData, BufferUsageHint.StaticDraw);
Int32 positionId = GL.GetAttribLocation(programId, "position");
GL.EnableVertexAttribArray(positionId);
GL.VertexAttribPointer(positionId, 2, VertexAttribPointerType.Float, false, 0, 0);
GL.Viewport(0, 0, 1024, 1024);
So, here is where I managed to get somewhere
Single[] result = new Single[1024 * 1024];
GL.**DrawBuffers**(2, new DrawBuffersEnum[] { DrawBuffersEnum.ColorAttachment0, DrawBuffersEnum.ColorAttachment1 });
GL.DrawArrays(PrimitiveType.Quads, 0, 4);
GL.**ReadBuffer**(ReadBufferMode.ColorAttachment0);
GL.ReadPixels(0, 0, 1024, 1024, PixelFormat.Red, PixelType.Float, result);
GL.**ReadBuffer**(ReadBufferMode.ColorAttachment1);
GL.ReadPixels(0, 0, 1024, 1024, PixelFormat.Red, PixelType.Float, result);
Which gives me both outputs properly. For one "channel", I'd call DrawArrays again and it would feed the output0 into input0, but here I want output1 to also go into input1. Is this doable? This R32f / Red is for testing -- they will be replaced by vec4.
Edit:
changed:
layout(binding = 0) uniform sampler2DRect inData0
layout(binding = 1) uniform sampler2DRect inData1
layout(location = 0) out float outData0;
layout(location = 1) out float outData1;
and it works.
Bind input, and point the output to correct locatio.
layout(binding = 0) uniform sampler2DRect inData0
layout(binding = 1) uniform sampler2DRect inData1
layout(location = 0) out float outData0;
layout(location = 1) out float outData1;

OpenGL vertex shader: weird matrix translation

I'm trying to move a triangle based on time using a matrix. But it does some weird stuff:
What it should do:
move on the x-axis
What it does:
The top point of the triangle is fixed and the other points seem to move around it in a circular movement and scale on the x, z axis (I'm still in 2d so I don't have depth).
My C++ Code:
...
GLfloat timeValue = glfwGetTime();
GLfloat offset = (sin(timeValue * 4) / 2);
GLfloat matrix[16] = {
1, 0, 0, offset,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
};
GLuint uniform_m_transform = glGetUniformLocation(shader_program, "m_transform");
glUniformMatrix4fv(uniform_m_transform, 1, GL_FALSE, matrix);
...
My vertex shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
out vec3 ourColor;
uniform mat4 m_transform;
void main()
{
ourColor = color;
gl_Position = m_transform * vec4(position, 1.0);
}
I don't know what I did wrong, according to the tutorial the matrix attribute I've set to offset should change the x-translation.
Do you know what's my mistake?
you are providing a row-major matrix, so you need to specify the transpose:
glUniformMatrix4fv(uniform_m_transform, 1, GL_TRUE, matrix);
Reference: glUniform, check the transpose parameter.

Failed to generate the shadow map

I've been struggling with the shadow mapping for two days (with JOGL), yet still couldn't make it work. Now I just want to render a very simple shadow map (grass), where closer looks brighter and further looks darker, from the view of the light point.
Here is my code:
//setting up buffers
gl.glGenFramebuffers(1, framebuff);
gl.glBindFramebuffer(GL4.GL_FRAMEBUFFER, framebuff.get(0));
gl.glGenTextures(2, textureBuff);
gl.glBindTexture(GL4.GL_TEXTURE_2D, textureBuff.get(0));
gl.glTexStorage2D(GL4.GL_TEXTURE_2D, 1, GL4.GL_R32F, displayWidth, displayHeight);
gl.glTexParameteri(GL4.GL_TEXTURE_2D, GL4.GL_TEXTURE_MAG_FILTER, GL4.GL_LINEAR);
gl.glTexParameteri(GL4.GL_TEXTURE_2D, GL4.GL_TEXTURE_MIN_FILTER, GL4.GL_LINEAR_MIPMAP_LINEAR);
gl.glFramebufferTexture(GL4.GL_FRAMEBUFFER, GL4.GL_COLOR_ATTACHMENT0, textureBuff.get(0), 0);
gl.glBindTexture(GL4.GL_TEXTURE_2D, textureBuff.get(1));
gl.glTexStorage2D(GL4.GL_TEXTURE_2D, 1, GL4.GL_DEPTH_COMPONENT32F, displayWidth, displayHeight);
gl.glTexParameteri(GL4.GL_TEXTURE_2D, GL4.GL_TEXTURE_MAG_FILTER, GL4.GL_LINEAR);
gl.glTexParameteri(GL4.GL_TEXTURE_2D, GL4.GL_TEXTURE_MIN_FILTER, GL4.GL_LINEAR_MIPMAP_LINEAR);
gl.glTexParameteri(GL4.GL_TEXTURE_2D, GL4.GL_TEXTURE_COMPARE_MODE, GL4.GL_COMPARE_REF_TO_TEXTURE);
gl.glTexParameteri(GL4.GL_TEXTURE_2D, GL4.GL_TEXTURE_COMPARE_FUNC, GL4.GL_LEQUAL);
gl.glFramebufferTexture(GL4.GL_FRAMEBUFFER, GL4.GL_DEPTH_ATTACHMENT, textureBuff.get(1), 0);
gl.glDrawBuffer(GL4.GL_NONE);
if(gl.glCheckFramebufferStatus(GL4.GL_FRAMEBUFFER) != GL4.GL_FRAMEBUFFER_COMPLETE)
System.out.println(gl.glCheckFramebufferStatus(GL4.GL_FRAMEBUFFER));
Vertex shader:
"#version 430 \n" +
"layout (location = 3) uniform mat4 mvMatrix; \n" +
"layout (location = 4) uniform mat4 proMatrix; \n" +
"layout (location = 0) in vec4 position; \n" +
" \n" +
"void main(void) \n" +
"{ \n" +
" randomize position... \n" +
" \n" +
" gl_Position = proMatrix * mvMatrix * position; \n" +
"}"
Fragment shader code:
"#version 430 \n" +
"out vec4 output_color; \n" +
"void main(void) \n" +
"{ \n" +
" output_color = vec4(gl_FragCoord.z); \n" +
"}"
drawing command (not sure if it's correct):
//gl.glBindFramebuffer(GL4.GL_FRAMEBUFFER, framebuff.get(0));
gl.glViewport(0, 0, displayWidth, displayWidth);
gl.glEnable(GL4.GL_POLYGON_OFFSET_FILL);
gl.glPolygonOffset(2.0f, 4.0f);
/*IntBuffer frameType = GLBuffers.newDirectIntBuffer(1);
frameType.put(GL4.GL_COLOR_ATTACHMENT0);
gl.glDrawBuffers(1, frameType);
gl.glClearBufferfv(GL4.GL_COLOR, 0, new float[] {0, 0, 0}, 0);
gl.glClearDepth(1.0f);*/
setupMVPMatrix();
gl.glBindVertexArray(vaoBuff.get(0));
gl.glUseProgram(shaderProgram);
gl.glDrawArraysInstanced(GL4.GL_TRIANGLE_STRIP, 0, 5, 512 * 512);
gl.glDisable(GL4.GL_POLYGON_OFFSET_FILL);
//gl.glBindFramebuffer(GL4.GL_FRAMEBUFFER, 0);
added:
when I comment the glBindFramebuffer(), the grass appears correctly with the white color (from the light point of view, which shows the matrix should be correct).
But if I call glBindFramebuffer() with depth test enabled, everything just disappeared while I expect the closer grass to be brighter and further grass to be darker.(also checked the framebuffer status, nothing wrong)
Any help?
Fixed, I didn't understand the meaning of off-screen rendering at the beginning. Just render the depth value stored in the texture into the default framebuffer for display, and everything work just fine. I can't believe I spent 5 days on this.
gl.glBindFramebuffer(GL4.GL_FRAMEBUFFER, framebuff.get(0));
draw scene...(storing the depth value into the depth texture)
gl.glBindFramebuffer(GL4.GL_FRAMEBUFFER, 0);
draw scene...(comparing with the depth texture stored)