Related
I am trying to implement a Streak shader, which is described here:
http://www.chrisoat.com/papers/Oat-SteerableStreakFilter.pdf
Short explanation: Samples a point with a 1d kernel in a given direction. The kernel size grows exponentially in each step. Color values are weighted based on distance to sampled point and summed. The result is a smooth tail/smear/light streak effect on that direction. Here is the frag shader:
precision highp float;
uniform sampler2D u_texture;
varying vec2 v_texCoord;
uniform float u_Pass;
const float kernelSize = 4.0;
const float atten = 0.95;
vec4 streak(in float pass, in vec2 texCoord, in vec2 dir, in vec2 pixelStep) {
float kernelStep = pow(kernelSize, pass - 1.0);
vec4 color = vec4(0.0);
for(int i = 0; i < 4; i++) {
float sampleNum = float(i);
float weight = pow(atten, kernelStep * sampleNum);
vec2 sampleTexCoord = texCoord + ((sampleNum * kernelStep) * (dir * pixelStep));
vec4 texColor = texture2D(u_texture, sampleTexCoord) * weight;
color += texColor;
}
return color;
}
void main() {
vec2 iResolution = vec2(512.0, 512.0);
vec2 pixelStep = vec2(1.0, 1.0) / iResolution.xy;
vec2 dir = vec2(1.0, 0.0);
float pass = u_Pass;
vec4 streakColor = streak(pass, v_texCoord, dir, pixelStep);
gl_FragColor = vec4(streakColor.rgb, 1.0);
}
It was going to be used for a starfield type of effect. And here is the implementation on ShaderToy which works fine:
https://www.shadertoy.com/view/ll2BRG
(Note: Disregard the first shader in Buffer A, it just filters out the dim colors in the input texture to emulate a star field since afaik ShaderToy doesn't allow uploading custom textures)
But when I use the same shader in my own code and render using ping-pong FrameBuffers, it looks different. Here is my own implementation ported over to WebGL:
https://jsfiddle.net/1b68eLdr/87755/
I basically create 2 512x512 buffers, ping-pong the shader 4 times increasing kernel size at each iteration according to the algorithm and render the final iteration on the screen.
The problem is visible banding, and my streaks/tails seem to be losing brightness a lot faster: (Note: the image is somewhat inaccurate, the lengths of the streaks are same/correct, its color values that are wrong)
I have been struggling with this for a while in Desktop OpenGl / LWJGL, I ported it over to WebGL/Javascript and uploaded on JSFiddle in hopes someone can spot what the problem is. I suspect it's either about texture coordinates or FrameBuffer configuration since shaders are exactly the same.
The reason it works on Shadertoys is because it uses a floating-point render target.
Simply use gl.FLOAT as the type of your framebuffer texture and the issue is fixed (I could verify it with the said modification on your JSFiddle).
So do this in your createBackingTexture():
// Just request the extension (MUST be done).
gl.getExtension('OES_texture_float');
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, this._width, this._height, 0, gl.RGBA, gl.FLOAT, null);
Hi I am trying to render lots of axis aligned cubes, with glDrawArraysInstanced(). Each cube of fixed size can only vary on its center position and color. Also each cube only takes few different colors. So I want to potentially render millions of cubes with following per instance data:
struct CubeInfo {
Eigen::Vector3f center; // center of the cube (x,y,z)
int labelId; // label of the cube which affects its color
};
So I am using the following vertex shader:
#version 330
uniform mat4 mvp_matrix;
//regular vertex attributes
layout(location = 0) in vec3 vertex_position;
// Per Instance variables
layout(location = 1) in vec3 cube_center;
layout(location = 2) in int cube_label;
// color out to frag shader
out vec4 color_out;
void main(void) {
// Add offset cube_center
vec4 new_pos = vec4(vertex_position + cube_center, 1);
// Calculate vertex position in screen space
gl_Position = mvp_matrix * new_pos;
// Set color_out based on label
switch (cube_label) {
case 1:
color_out = vec4(0.5, 0.25, 0.5, 1);
break;
case 2:
color_out = vec4(0.75, 0.0, 0.0, 1);
break;
case 3:
color_out = vec4(0.0, 0.0, 0.5, 1);
break;
case 4:
color_out = vec4(0.75, 1.0, 0.0, 1);
break;
default:
color_out = vec4(0.5, 0.5, 0.5, 1); // Grey
break;
}
}
and the corresponding fragment shader:
#version 330
in vec4 color_out;
out vec4 fragColor;
void main()
{
// Set fragment color from texture
fragColor = color_out;
}
However color_out always takes the default gray value, even though, cube_label values are between 1 to 4. This is my problem. Am I doing something wrong in the shader above**?**
I initialized the cubeInfo vbo with random labelIds between 1-4. So I am expecting to see a colorful output than following:
This is my render code, which makes use of Qt's QGLShaderProgram and QGLBuffer wrapper:
// Enable back face culling
glEnable(GL_CULL_FACE);
cubeShaderProgram_.bind();
// Set the vertexbuffer stuff (Simply 36 vertices for cube)
cubeVertexBuffer_.bind();
cubeShaderProgram_.setAttributeBuffer("vertex_position", GL_FLOAT, 0, 3, 0);
cubeShaderProgram_.enableAttributeArray("vertex_position");
cubeVertexBuffer_.release();
// Set the per instance buffer stuff
cubeInstanceBuffer_.bind();
cubeShaderProgram_.setAttributeBuffer("cube_center", GL_FLOAT, offsetof(CubeInfo,center), 3, sizeof(CubeInfo));
cubeShaderProgram_.enableAttributeArray("cube_center");
int center_location = cubeShaderProgram_.attributeLocation("cube_center");
glVertexAttribDivisor(center_location, 1);
cubeShaderProgram_.setAttributeBuffer("cube_label", GL_INT, offsetof(CubeInfo,labelId), 1, sizeof(CubeInfo));
cubeShaderProgram_.enableAttributeArray("cube_label");
int label_location = cubeShaderProgram_.attributeLocation("cube_label");
glVertexAttribDivisor(label_location, 1);
cubeInstanceBuffer_.release();
// Do Instanced Renering
glDrawArraysInstanced(GL_TRIANGLES, 0, 36, displayed_num_cubes_ );
cubeShaderProgram_.disableAttributeArray("vertex_position");
cubeShaderProgram_.disableAttributeArray("cube_center");
cubeShaderProgram_.disableAttributeArray("cube_label");
cubeShaderProgram_.release();
Apart from my primary question above (color problem), is this a good way to do Minecraft?
Update
If I change my CubeInfo.labelId attribute from int to float, and the corresponding vertex shader variable cube_label to also float, it Works!!. Why is it so? This page says GLSL suppoers int type. For me, I would prefer labelId/cube_label to be some int/short.
Update2:
Even if i just change to GL_FLOAT instead of GL_INT in the following line of my render code, I get proper colors.
cubeShaderProgram_.setAttributeBuffer("cube_label", GL_INT, offsetof(CubeInfo,labelId), 1, sizeof(CubeInfo));
The problem with your label attribute is, that it is an integer attribute, but your don't set it as integer attribute. Qt's setAttributeBuffer functions don't know anything about integer attributes, they all use glVertexAttribPointer under the hood, which takes the vertex data in any arbitrary format and converts it into float to pass it into an in float attribute, which doesn't match the in int from your shader (so the attribute will probably just remain at some random default value, or get some undefined values).
To actually pass data into a real integer vertex attribute (which is something entirely different from a float attribute and wasn't introduced until GL 3+), you need the function glVertexAttribIPointer (note the I in there, and similar D for in double attributes, just using GL_DOUBLE won't work in this case either). But sadly enough Qt, not being really fit for GL 3+ yet, doesn't seem to have a wrapper for that. So you will either have to do it manually using:
glVertexAttribIPointer(cubeShaderProgram_.attributeLocation("cube_label"),
1, GL_INT, sizeof(CubeInfo),
static_cast<const char*>(0)+offsetof(CubeInfo,labelId));
instead of the cubeShaderProgram_.setAttributeBuffer call, or use an in float attribute instead.
If you want to use the color assigned in vertex shader, you should at least write a trivial fragment shader like:
void main()
{
gl_FragColor = color_out;
}
Update
I think that either you can't pass cube_label to your vertex shader or you didn't set them in the first place (in the structure). Latter one is more likely but you can replace your switch-case with the following line to see the real value passed.
color_out = vec4(float(cube_label) / 4.0, 0, 0, 1.0);
Update2
Once I had a similar problem with Intel GPUs (drivers). No matter what I tried couldn't pass integer values. However, same shader was working flawlessly on NVIDIA so as in your case I converted int into float.
I am trying to implement instancing in my OpenGL program. I got it to work, and then decided to make my GLSL code more efficient by sending the Model-View-Projection multiplication matrix as input to the GLSL program, so that the CPU computes it for each instance, opposed to the GPU. Here is my vertex shader code (most of it is irrelevant to my question):
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexColor;
layout(location = 3) in vec3 vertexNormal_modelspace;
layout(location = 6) in mat4 models;
layout(location = 10) in mat4 modelsV;
layout(location = 14) in mat4 modelsVP;
// Output data ; will be interpolated for each fragment.
out vec3 newColor;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 MV;
uniform mat4 P;
uniform mat4 V;
uniform mat4 M;
uniform int num_lights;
uniform vec3 Lights[256];
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = P * modelsV * vec4(vertexPosition_modelspace,1);
// Position of the vertex, in worldspace : M * position
Position_worldspace = (models * vec4(vertexPosition_modelspace,1)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( modelsV * vec4(vertexPosition_modelspace,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
// Normal of the the vertex, in camera space
Normal_cameraspace = ( modelsV * vec4(vertexNormal_modelspace,0)).xyz;
// UV of the vertex. No special space for this one.
newColor = vertexColor;
}
The above code works, but only because I'm not using the last input modelsVP to calculate gl_position. If I do use it (instead of computing P*modelsV), the instances won't be drawn, and I get this error:
Linking program
Compiling shader : GLSL/meshColor.vertexshader
Compiling shader : GLSL/meshColor.fragmentshader
Linking program
Vertex info
0(10) : error C5102: input semantic attribute "ATTR" has too big of a numeric index (16)
0(10) : error C5102: input semantic attribute "ATTR" has too big of a numeric index (16)
0(10) : error C5041: cannot locate suitable resource to bind variable "modelsVP". Possibly large array.
I'm sure I'm linking it correctly in my OpenGL code, because if I swap the input location modelsVP with modelsV so that it is 10 instead of 14, I am able to use it, but not modelsV. Is there a maximum number of inputs you can have for your vertex shader? I really can't think of any other idea of why else I would get this error...
I'll include more of my OpenGL code that is relevant here, but I'm pretty sure that it's correct (it's not all in the same class or method):
// Buffer data for VBO. The numbers must match the layout in the GLSL code.
#define position 0
#define uv 1
#define color 2
#define normal 3
#define tangent 4
#define bitangent 5
#define model 6 // 4x4 matrices take 4 positions
#define modelV 10
#define modelVP 14
#define num_buffers 18
GLuint VBO[num_buffers];
glGenBuffers(num_buffers, VBO);
for( int i=0; i<ModelMatrices.size(); i++ )
{
mvp.push_back( projection * view * ModelMatrices.at(i) );
mv.push_back( view * ModelMatrices.at(i) );
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[model]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * ModelMatrices.size(), &ModelMatrices[0], GL_DYNAMIC_DRAW);
for (unsigned int i = 0; i < 4 ; i++) {
glEnableVertexAttribArray(model + i);
glVertexAttribPointer(model + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(model + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[modelV]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * mv.size(), &mv[0], GL_DYNAMIC_DRAW);
for (unsigned int i = 0; i < 4 ; i++) {
glEnableVertexAttribArray(modelV + i);
glVertexAttribPointer(modelV + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(modelV + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[modelVP]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * mvp.size(), &mvp[0], GL_DYNAMIC_DRAW);
for (unsigned int i = 0; i < 4 ; i++) {
glEnableVertexAttribArray(modelVP + i);
glVertexAttribPointer(modelVP + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4), (const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(modelVP + i, 1);
}
OpenGL mandates implementations offer a minimum of 16 4-component vertex attributes. Therefore an index of 16 is not guaranteed to be supported by all implementations; see GL_MAX_VERTEX_ATTRIBS for more details.
Your mat4 vertex attributes count as 4 4-component attributes, so an index of 14 is out of range on implementations that only support 16 4-component vertex attributes.
You are using too many vertex attributes. Here's how to reduce the number of attributes without changing anything much about your code (and any functional changes are improvements). The following assumes that models is the "model-to-world" matrix, modelsV is the "model-to-camera" matrix, and that modelsVP is the "model-to-projection" matrix:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexColor;
layout(location = 3) in vec3 vertexNormal_modelspace;
layout(location = 6) in mat4 modelsV;
// Output data ; will be interpolated for each fragment.
out vec3 newColor;
//The fragment shader should work in *camera* space, not world space.
out vec4 Position_cameraspace;
out vec3 Normal_cameraspace;
//out vec3 EyeDirection_cameraspace; Can be computed from Position_cameraspace in the FS.
// Values that stay constant for the whole mesh.
uniform mat4 P;
void main()
{
Position_cameraspace = modelsV * vec4(vertexPosition_modelspace, 1.0);
gl_Position = P * Position_cameraspace;
Normal_cameraspace = ( modelsV * vec4(vertexNormal_modelspace,0)).xyz;
newColor = vertexColor;
}
See? Isn't that much simpler? Fewer uniforms in the vertex shader, fewer outputs to the fragment shader, fewer math computations, and fewer vertex attributes.
All you need to do is change your fragment shader to use the camera-space position, rather than the world-space position. Which should be a reasonably easy change.
...because the floats seem to be coming out fine, but there's something wrong with the ints.
Essentially I have a struct called "BlockInstance" which holds a vec3 and an int. I've got an array of these BlockInstances which I buffer like so (translating from C# to C for clarity):
glBindBuffer(GL_ARRAY_BUFFER, bufferHandle);
glBufferData(GL_ARRAY_BUFFER, sizeof(BlockInstance)*numBlocks, blockData, GL_DYNAMIC_DRAW);
glVertexAttribPointer(3,3,GL_FLOAT,false,16,0);
glVertexAttribPointer(4,1,GL_INT,false,16,12);
glVertexAttribDivisor(3,1);
glVertexAttribDivisor(4,1);
And my vertex shader looks like this:
#version 330
layout (location = 0) in vec3 Position;
layout (location = 1) in vec2 TexCoord;
layout (location = 2) in vec3 Normal;
layout (location = 3) in vec3 Translation;
layout (location = 4) in int TexIndex;
uniform mat4 ProjectionMatrix;
out vec2 TexCoord0;
void main()
{
mat4 trans = mat4(
1,0,0,0,
0,1,0,0,
0,0,1,0,
Translation.x,Translation.y,Translation.z,1);
gl_Position = ProjectionMatrix * trans * vec4(Position, 1.0);
TexCoord0 = vec2(TexCoord.x+TexIndex,TexCoord.y)/16;
}
When I replace TexIndex on the last line of my GLSL shader with a constant like 0, 1, or 2, my textures come out fine, but if I leave it like it is, they come out all mangled, so there must be something wrong with the number, right? But I don't know what it's coming out as so it's hard to debug.
I've looked at my array of BlockInstances, and they're all set to 1,2, or 19 so I don't think my input is wrong...
What else could it be?
Note that I'm using a sprite map texture where each of the tiles is 16x16 px but my TexCoords are in the range 0-1, so I add a whole number to it to choose which tile, and then divide it by 16 (the map is also 16x16 tiles) to put it back into the proper range. The idea is I'll replace that last line with
TexCoord0 = vec2(TexCoord.x+(TexIndex%16),TexCoord.y+(TexIndex/16))/16;
-- GLSL does integer math, right? An int divided by an int will come out as whole number?
If I try this:
TexCoord0 = vec2(TexCoord.x+(TexIndex%16),TexCoord.y)/16;
The texture looks fine, but it's not using the right sprite. (Looks to be using the first sprite)
If I do this:
TexCoord0 = vec2(TexCoord.x+(TexIndex%16),TexCoord.y+(TexIndex/16))/16;
It comes out all white. This leads me to believe that TexIndex is coming out to be a very large number (bigger than 256 anyway) and that it's probably a multiple of 16.
layout (location = 4) in int TexIndex;
There's your problem.
glVertexAttribPointer is used to send data that will be converted to floating-point values. It's used to feed floating-point attributes. Passing integers is possible, but those integers are converted to floats, because that's what glVertexAttribPointer is for.
What you need is glVertexAttribIPointer (notice the I). This is used for providing signed and unsigned integer data.
So if you declare a vertex shader input as a float or some non-prefixed vec, you use glVertexAttribPointer to feed it. If you declare the input as int, uint, ivec or uvec, then you use glVertexAttribIPointer.
glLineStipple has been deprecated in the latest OpenGL APIs.
What is it replaced with?
If not replaced, how can I get a similar effect?
(I don't want to use a compatibility profile of course...)
Sorry, it hasn't been replaced with anything. The first idea coming to my mind for emulating it would be the geometry shader. You feed the geometry shader with a line, compute its screen space length and based on that you generate a variable number of sub lines between its start and end vertex.
EDIT: Perhaps you could also use a 1D texture with the alpha (or red) channel encoding the pattern as 0.0 (no line) or 1.0 (line) and then have the lines texture coordinate go from 0 to 1 and in the fragment chader you make a simple alpha test, discarding fragments with alpha below some threshold. You can facilitate the geometry shader to generate your line texCoords, as otherwise you need different vertices for every line. This way you can also make the texCoord dependent on the screen space length of the line.
The whole thing get's more difficult if you draw triangles (using polygon mode GL_LINE). Then you have to do the triangle-line transformation yourself in the geometry shader, putting in triangles and putting out lines (that could also be a reason for deprecating polygon mode in the future, if it hasn't already).
EDIT: Although I believe this question abandomned, I have made a simple shader triple for the second approach. It's just a minimal solution, feel free to add custom features yourself. I haven't tested it because I lack the neccessary hardware, but you should get the point:
uniform mat4 modelViewProj;
layout(location=0) in vec4 vertex;
void main()
{
gl_Position = modelViewProj * vertex;
}
The vertex shader is a simple pass through.
layout(lines) in;
layout(line_strip, max_vertices=2) out;
uniform vec2 screenSize;
uniform float patternSize;
noperspective out float texCoord;
void main()
{
vec2 winPos0 = screenSize.xy * gl_in[0].gl_Position.xy / gl_in[0].gl_Position.w;
vec2 winPos1 = screenSize.xy * gl_in[1].gl_Position.xy / gl_in[1].gl_Position.w;
gl_Position = gl_in[0].gl_Position;
texCoord = 0.0;
EmitVertex();
gl_Position = gl_in[1].gl_Position;
texCoord = 0.5 * length(winPos1-winPos0) / patternSize;
EmitVertex();
}
In the geometry shader we take a line and compute its screen space length in pixels. We then devide this by the size of the stipple pattern texture, which would be factor*16 when emulating a call to glLineStipple(factor, pattern). This is taken as 1D texture coordinate of the second line end point.
Note that this texture coordinate has to be interpolated linearly (noperspective interpolation specifier). The usual perpective-correct interpolation would cause the stipple pattern to "squeeze together" on farther away parts of the line, whereas we are explicitly working with screen-space values.
uniform sampler1D pattern;
uniform vec4 lineColor;
noperspective in float texCoord;
layout(location=0) out vec4 color;
void main()
{
if(texture(pattern, texCoord).r < 0.5)
discard;
color = lineColor;
}
The fragment shader now just performs a simple alpha test using the value from the pattern texture, which contains a 1 for line and a 0 for no line. So to emulate the fixed function stipple you would have a 16 pixel 1-component 1D texture instead of a 16bit pattern. Don't forget to set the pattern's wrapping mode to GL_REPEAT, about the filtering mode I'm not that sure, but I suppose GL_NEAREST would be a good idea.
But as said earlier, if you want to render triangles using glPolygonMode, it won't work this way. Instead you have to adapt the geometry shader to accept triangles and generate 3 lines for each triangle.
EDIT: In fact OpenGL 3's direct support for integer operations in shaders allows us to completely drop this whole 1D-texture approach and work straight-forward with an actual bit-pattern. Thus the geometry shader is slightly changed to put out the actual screen-size pattern coordinate, without normalization:
texCoord = 0.5 * length(winPos1-winPos0);
In the fragment shader we then just take a bit pattern as unsigned integer (though 32-bit in contrast to glLineStipple's 16-bit value) and the stretch factor of the pattern and just take the texture coordinate (well, no texture anymore actually, but nevermind) modulo 32 to get it's position on the pattern (those explicit uints are annoying, but my GLSL compiler says implicit conversions between int and uint are evil):
uniform uint pattern;
uniform float factor;
...
uint bit = uint(round(linePos/factor)) & 31U;
if((pattern & (1U<<bit)) == 0U)
discard;
To answer this question, we've to investigate first, what glLineStipple actually does.
See the image, where the quad at the left is drawn by 4 separated line segments using the primitive type GL_LINES.
The circle at the right is drawn by a consecutive polygon line, using the primitive type GL_LINE_STRIP.
When using line segments, the stipple pattern started at each segment. The pattern is restarted at each primitive.
When using a line strip, then the stipple pattern is applied seamless to the entire polygon. A pattern seamlessly continuous beyond vertex coordinates.
Be aware that the length of the pattern is stretched at the diagonals. This is possibly the key to the implementation.
For separate line segments, this is not very complicated at all, but for line strips things get a bit more complicated. The length of the line cannot be calculated in the shader program, without knowing all the primitives of the line. Even if all the primitives would be known (e.g. SSBO), then the calculation would have to be done in a loop.
See also Dashed lines with OpenGL core profile.
Anyway, it is not necessary to implement a geometry shader. The trick is to know the start of the line segment in the fragment shader. This easy by using a flat interpolation qualifier.
The vertex shader has to pass the normalized device coordinate to the fragment shader. Once with default interpolation and once with no (flat) interpolation. This causes that in the fragment shade, the first input parameter contains the NDC coordinate of the actual position on the line and the later the NDC coordinate of the start of the line.
#version 330
layout (location = 0) in vec3 inPos;
flat out vec3 startPos;
out vec3 vertPos;
uniform mat4 u_mvp;
void main()
{
vec4 pos = u_mvp * vec4(inPos, 1.0);
gl_Position = pos;
vertPos = pos.xyz / pos.w;
startPos = vertPos;
}
Additionally the varying inputs, the fragment shader has uniform variables. u_resolution contains the width and the height of the viewport. u_factor and u_pattern are the multiplier and the 16 bit pattern according to the parameters of glLineStipple.
So the length of the line from the start to the actual fragment can be calculated:
vec2 dir = (vertPos.xy-startPos.xy) * u_resolution/2.0;
float dist = length(dir);
And fragment on the gap can be discarded, by the discard command.
uint bit = uint(round(dist / u_factor)) & 15U;
if ((u_pattern & (1U<<bit)) == 0U)
discard;
Fragment shader:
#version 330
flat in vec3 startPos;
in vec3 vertPos;
out vec4 fragColor;
uniform vec2 u_resolution;
uniform uint u_pattern;
uniform float u_factor;
void main()
{
vec2 dir = (vertPos.xy-startPos.xy) * u_resolution/2.0;
float dist = length(dir);
uint bit = uint(round(dist / u_factor)) & 15U;
if ((u_pattern & (1U<<bit)) == 0U)
discard;
fragColor = vec4(1.0);
}
This implementation is much easier and shorter, then using geometry shaders. The flat interpolation qualifier is supported since GLSL 1.30 and GLSL ES 3.00. In this version geometry shaders are not supported.
See the line rendering which was generated with the above shader.
The shader gives a proper result line segments, but fails for line strips, since the stipple pattern is restarted at each vertex coordinate.
The issue can't even be solved by a geometry shader. This part of the question remains still unresolved.
For the following simple demo program I've used the GLFW API for creating a window, GLEW for loading OpenGL and GLM -OpenGL Mathematics for the math. I don't provide the code for the function CreateProgram, which just creates a program object, from the vertex shader and fragment shader source code:
#include <vector>
#include <string>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <gl/gl_glew.h>
#include <GLFW/glfw3.h>
std::string vertShader = R"(
#version 330
layout (location = 0) in vec3 inPos;
flat out vec3 startPos;
out vec3 vertPos;
uniform mat4 u_mvp;
void main()
{
vec4 pos = u_mvp * vec4(inPos, 1.0);
gl_Position = pos;
vertPos = pos.xyz / pos.w;
startPos = vertPos;
}
)";
std::string fragShader = R"(
#version 330
flat in vec3 startPos;
in vec3 vertPos;
out vec4 fragColor;
uniform vec2 u_resolution;
uniform uint u_pattern;
uniform float u_factor;
void main()
{
vec2 dir = (vertPos.xy-startPos.xy) * u_resolution/2.0;
float dist = length(dir);
uint bit = uint(round(dist / u_factor)) & 15U;
if ((u_pattern & (1U<<bit)) == 0U)
discard;
fragColor = vec4(1.0);
}
)";
GLuint CreateVAO(std::vector<glm::vec3> &varray)
{
GLuint bo[2], vao;
glGenBuffers(2, bo);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, bo[0] );
glBufferData(GL_ARRAY_BUFFER, varray.size()*sizeof(*varray.data()), varray.data(), GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
return vao;
}
int main(void)
{
if ( glfwInit() == 0 )
return 0;
GLFWwindow *window = glfwCreateWindow( 800, 600, "GLFW OGL window", nullptr, nullptr );
if ( window == nullptr )
return 0;
glfwMakeContextCurrent(window);
glewExperimental = true;
if ( glewInit() != GLEW_OK )
return 0;
GLuint program = CreateProgram(vertShader, fragShader);
GLint loc_mvp = glGetUniformLocation(program, "u_mvp");
GLint loc_res = glGetUniformLocation(program, "u_resolution");
GLint loc_pattern = glGetUniformLocation(program, "u_pattern");
GLint loc_factor = glGetUniformLocation(program, "u_factor");
glUseProgram(program);
GLushort pattern = 0x18ff;
GLfloat factor = 2.0f;
glUniform1ui(loc_pattern, pattern);
glUniform1f(loc_factor, factor);
//glLineStipple(2.0, pattern);
//glEnable(GL_LINE_STIPPLE);
glm::vec3 p0(-1.0f, -1.0f, 0.0f);
glm::vec3 p1(1.0f, -1.0f, 0.0f);
glm::vec3 p2(1.0f, 1.0f, 0.0f);
glm::vec3 p3(-1.0f, 1.0f, 0.0f);
std::vector<glm::vec3> varray1{ p0, p1, p1, p2, p2, p3, p3, p0 };
GLuint vao1 = CreateVAO(varray1);
std::vector<glm::vec3> varray2;
for (size_t u=0; u <= 360; u += 8)
{
double a = u*M_PI/180.0;
double c = cos(a), s = sin(a);
varray2.emplace_back(glm::vec3((float)c, (float)s, 0.0f));
}
GLuint vao2 = CreateVAO(varray2);
glm::mat4(project);
int vpSize[2]{0, 0};
while (!glfwWindowShouldClose(window))
{
int w, h;
glfwGetFramebufferSize(window, &w, &h);
if (w != vpSize[0] || h != vpSize[1])
{
vpSize[0] = w; vpSize[1] = h;
glViewport(0, 0, vpSize[0], vpSize[1]);
float aspect = (float)w/(float)h;
project = glm::ortho(-aspect, aspect, -1.0f, 1.0f, -10.0f, 10.0f);
glUniform2f(loc_res, (float)w, (float)h);
}
glClear(GL_COLOR_BUFFER_BIT);
glm::mat4 modelview1( 1.0f );
modelview1 = glm::translate(modelview1, glm::vec3(-0.6f, 0.0f, 0.0f) );
modelview1 = glm::scale(modelview1, glm::vec3(0.5f, 0.5f, 1.0f) );
glm::mat4 mvp1 = project * modelview1;
glUniformMatrix4fv(loc_mvp, 1, GL_FALSE, glm::value_ptr(mvp1));
glBindVertexArray(vao1);
glDrawArrays(GL_LINES, 0, (GLsizei)varray1.size());
glm::mat4 modelview2( 1.0f );
modelview2 = glm::translate(modelview2, glm::vec3(0.6f, 0.0f, 0.0f) );
modelview2 = glm::scale(modelview2, glm::vec3(0.5f, 0.5f, 1.0f) );
glm::mat4 mvp2 = project * modelview2;
glUniformMatrix4fv(loc_mvp, 1, GL_FALSE, glm::value_ptr(mvp2));
glBindVertexArray(vao2);
glDrawArrays(GL_LINE_STRIP, 0, (GLsizei)varray2.size());
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
return 0;
}
See also
Dashed line in OpenGL3?
OpenGL ES - Dashed Lines
Since I struggled a bit (no pun intended) to get it right, I thought it could be useful to others if I shared my implementation of a set of stippling shaders based on Christian Rau's version.
To control pattern density, the fragment shader requires the number of patterns nPatterns per unit length of the viewport - instead of setting a factor. Also included is an optional clipping plane feature.
The rest is mainly commenting and cleaning.
Free to use to all intents and purposes.
The vertex shader:
#version 330
in vec4 vertex;
void main(void)
{
// just a pass-through
gl_Position = vertex;
}
The geometry shader:
#version 330
layout(lines) in;
layout(line_strip, max_vertices = 2) out;
uniform mat4 pvmMatrix;
uniform mat4 mMatrix;
uniform mat4 vMatrix;
out vec3 vPosition; // passed to the fragment shader for plane clipping
out float texCoord; // passed to the fragment shader for stipple pattern
void main(void)
{
// to achieve uniform pattern density whatever the line orientation
// the upper texture coordinate is made proportional to the line's length
vec3 pos0 = gl_in[0].gl_Position.xyz;
vec3 pos1 = gl_in[1].gl_Position.xyz;
float max_u_texture = length(pos1 - pos0);
// Line Start
gl_Position = pvmMatrix * (gl_in[0].gl_Position);
texCoord = 0.0;
// depth position for clip plane
vec4 vsPos0 = vMatrix * mMatrix * gl_Position;
vPosition = vsPos0.xyz / vsPos0.w;
EmitVertex(); // one down, one to go
// Line End
gl_Position = pvmMatrix * (gl_in[1].gl_Position);
texCoord = max_u_texture;
// depth position for clip plane
vec4 vsPos1 = vMatrix * mMatrix * gl_Position;
vPosition = vsPos0.xyz / vsPos0.w;
EmitVertex();
// done
EndPrimitive();
}
The fragment shader:
#version 330
uniform int pattern; // an integer between 0 and 0xFFFF representing the bitwise pattern
uniform int nPatterns; // the number of patterns/unit length of the viewport, typically 200-300 for good pattern density
uniform vec4 color;
uniform vec4 clipPlane0; // defined in view-space
in float texCoord;
in vec3 vPosition;
layout(location=0) out vec4 fragColor;
void main(void)
{
// test vertex postion vs. clip plane position (optional)
if (vPosition.z > clipPlane0.w) {
discard;
return;
}
// use 4 bytes for the masking pattern
// map the texture coordinate to the interval [0,2*8[
uint bitpos = uint(round(texCoord * nPatterns)) % 16U;
// move a unit bit 1U to position bitpos so that
// bit is an integer between 1 and 1000 0000 0000 0000 = 0x8000
uint bit = (1U << bitpos);
// test the bit against the masking pattern
// Line::SOLID: pattern = 0xFFFF; // = 1111 1111 1111 1111 = solid pattern
// Line::DASH: pattern = 0x3F3F; // = 0011 1111 0011 1111
// Line::DOT: pattern = 0x6666; // = 0110 0110 0110 0110
// Line::DASHDOT: pattern = 0xFF18; // = 1111 1111 0001 1000
// Line::DASHDOTDOT: pattern = 0x7E66; // = 0111 1110 0110 0110
uint up = uint(pattern);
// discard the bit if it doesn't match the masking pattern
if ((up & bit) == 0U) discard;
fragColor = color;
}