GLSL "No Matching Overloaded Function Found" (hsv2rgb) - glsl

What obvious thing am I missing? I'm trying to compile/run this vertex shader:
// an attribute will receive data from a buffer
attribute vec2 a_position;
uniform vec2 u_resolution;
varying vec4 v_color;
// all shaders have a main function
void main() {
// convert the position from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clip space)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace, 0, 1);
vec3 c = hsv2rgb(vec3(0.5, 0.5, 0.5));
/*temporary*/ v_color = gl_Position * 0.5 + 0.5;
gl_PointSize = 1.0;
}
// All components are in the range [0…1], including hue.
vec3 hsv2rgb(vec3 c)
{
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
using code I found at From RGB to HSV in OpenGL GLSL but get the error:
https://webglfundamentals.org/webgl/lessons/webgl-boilerplate.html
webgl-utils.js:66 *** Error compiling shader '[object WebGLShader]':ERROR: 0:19: 'hsv2rgb' : no matching overloaded function found
ERROR: 0:19: '=' : dimension mismatch
ERROR: 0:19: '=' : cannot convert from 'const mediump float' to 'highp 3-component vector of float'
The error is specific to the hsv2rgb call. I have tried a number of things, including making the parameter a variable (i.e. adding vec3 v = vec3(0.5, 0.5, 0.5) and passing v into hsv2rgb), and making a skeleton hbv2rgb that simply returns its parameter. In the referenced SO post, I saw that another user seemed to have exactly the same problem, but I am properly passing in a vec3 rather than 3 floats.
If it makes any difference, here is the fragment shader as well:
// fragment shaders don't have a default precision so we need
// to pick one. mediump is a good default
precision mediump float;
varying vec4 v_color;
void main() {
// gl_FragColor is a special variable a fragment shader
// is responsible for setting
gl_FragColor = v_color;
}

From OpenGL ES Shading Language 1.00 Specification - 6.1 Function Definitions:
All functions must be either declared with a prototype or defined with a body before they are called.
Hence, you have to declare the function hsv2rgb before it is used the first time or you have to declare a function prototype:
vec3 hsv2rgb(vec3 c);
void main() {
// [...]
vec3 c = hsv2rgb(vec3(0.5, 0.5, 0.5));
// [...]
}
vec3 hsv2rgb(vec3 c)
{
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}

Related

How can I add gradient banding and control it?

I am rendering a quad with up to 4 different vertex colours.
My vertex shader is super simple:
#version 330
layout(location=0) in vec2 inVertexPosition;
layout(location=1) in vec4 inColor;
out vec4 color;
void main()
{
gl_Position = vec4(inVertexPosition.x,-inVertexPosition.y, 0.0, 1.0);
color = inColor;
}
The fragment shader:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
void main()
{
frag_colour = color;
}
also super simple. The results are a smooth gradient from corner to corner. However, I would like to produce an effect similar to the background of the text in this image:
where there is a limited palette so there's intentional banding in the gradient. My attempt to create this same style is a combination of Gradient with fixed number of levels and From RGB to HSV in OpenGL GLSL which has given me a fragment shader like this:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
uniform bool uBand = false;
uniform float uBandingSteps;
vec3 rgb2hsv(vec3 c)
{
vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
float d = q.x - min(q.w, q.y);
float e = 1.0e-10;
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
}
vec3 hsv2rgb(vec3 c)
{
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main()
{
if(uBand)
{
vec3 hsv = rgb2hsv(color.rgb);
float h = floor(hsv.x * (uBandingSteps*3+1) + 0.5) / (uBandingSteps*3+1);
float s = floor(hsv.y * (uBandingSteps*3+1) + 0.5) / (uBandingSteps*3+1);
float v = floor(hsv.z * (uBandingSteps*3+1) + 0.5) / (uBandingSteps*3+1);
frag_colour = vec4(hsv2rgb(vec3(h,s,v)),color.a);
}
else
frag_colour = color;
}
This works to a degree, however, for some reason I need to multiply uBandingSteps to generate the right number of colours. The above works "okay", but the problem is that the banding seems fairly arbitrary:
You can see that the banding doesn't give a bevelled sort of look, but instead the first colour is narrow, then large, then narrow, then large etc. Rather than being gradually larger until it gets to the middle colour that's the widest part of the gradient, and then steadily retreats.
How can I modify what I have in order to produce the intended effect? (sort of bevelled, smooth, stepped gradient).
Possibly the answer is super simple, too. Just round the color channels.
e.g. If you want to allow 8 different gradients for each color channel, then based on the fragment shader of the question:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
void main()
{
vec3 color8 = round(color.rgb * 8.0) / 8.0;
frag_colour = vec4(color8, color.a);
}

Environment Mapping + Source Lights

I found a good example of environment mapping equirectangular. Here's the code:
VERTEX SHADER
varying vec3 Normal;
varying vec3 EyeDir;
varying float LightIntensity;
uniform vec3 LightPos;
void main(void){
gl_Position = ftransform();
Normal = normalize(gl_NormalMatrix * gl_Normal);
vec4 pos = gl_ModelViewMatrix * gl_Vertex;
EyeDir = pos.xyz;
LightIntensity = max(dot(normalize(LightPos - EyeDir), Normal), 0.0);
}
FRAGMENT SHADER
const vec3 Xunitvec = vec3 (1.0, 0.0, 0.0);
const vec3 Yunitvec = vec3 (0.0, 1.0, 0.0);
uniform vec3 BaseColor;
uniform float MixRatio;
uniform sampler2D EnvMap;
varying vec3 Normal;
varying vec3 EyeDir;
varying float LightIntensity;
void main (void){
// Compute reflection vector
vec3 reflectDir = reflect(EyeDir, Normal);
// Compute altitude and azimuth angles
vec2 index;
index.y = dot(normalize(reflectDir), Yunitvec);
reflectDir.y = 0.0;
index.x = dot(normalize(reflectDir), Xunitvec) * 0.5;
// Translate index values into proper range
if (reflectDir.z >= 0.0)
index = (index + 1.0) * 0.5;
else
{
index.t = (index.t + 1.0) * 0.5;
index.s = (-index.s) * 0.5 + 1.0;
}
// if reflectDir.z >= 0.0, s will go from 0.25 to 0.75
// if reflectDir.z < 0.0, s will go from 0.75 to 1.25, and
// that's OK, because we've set the texture to wrap.
// Do a lookup into the environment map.
vec3 envColor = vec3 (texture2D(EnvMap, index));
// Add lighting to base color and mix
vec3 base = LightIntensity * BaseColor;
envColor = mix(envColor, base, MixRatio);
gl_FragColor = vec4 (envColor, 1.0);
}
My problem is in the vertex shader.
LightIntensity = max(dot(normalize(LightPos - EyeDir), Normal), 0.0);
I'm subtracting the eye direction to the direction of light. But if I have more than one light source ... What I should do the calculation?
I use version 1.2 of GLSL.
Light is additive, so you just need to sum up the contributions of each light. If you have a fixed number of them, you can do that in a single pass through the shader—you just define a uniform for each light (position to start with, though you’ll probably want intensity/color as well) and calculate the final intensity like this:
LightIntensity = max(dot(normalize(Light1Pos - EyeDir), Normal), 0.0) + max(dot(normalize(Light2Pos - EyeDir), Normal), 0.0) + max(dot(normalize(Light3Pos - EyeDir), Normal), 0.0);

Shadowmapping always produces shadows beyond far plane

I am working on the beginnings of omnidirectional shadow mapping in my engine. For now I am only producing one shadowmap as a test. I am getting an odd result when using my current shaders. Here is a screenshot which shows the problem:
I am using a near value of 0.5 and a far value of 5.0 in the projection matrix for the shadowmap render. As near as I can tell, any value with a light-space z larger than my far plane distance is being computed by my fragment shader as in shadow.
This is my fragment shader:
in vec2 st;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform sampler2D shadowmapTexture;
uniform mat4 invProj;
uniform mat4 lightProj;
uniform vec3 lightPosition;
out vec3 color;
void main () {
vec3 clipSpaceCoords;
clipSpaceCoords.xy = st.xy * 2.0 - 1.0;
clipSpaceCoords.z = texture(depthTexture, st).x * 2.0 - 1.0;
vec4 position = invProj * vec4(clipSpaceCoords,1.0);
position.xyz /= position.w;
vec4 lightSpace = lightProj * vec4(position.xyz,1.0);
lightSpace.xyz /= lightSpace.w;
lightSpace.xyz = lightSpace.xyz * 0.5 + 0.5;
float lightDepth = texture(shadowmapTexture, lightSpace.xy).x;
vec3 normal = texture(normalTexture, st);
vec3 diffuse;
float shadowFactor = 1.0;
if(lightSpace.w > 0.0 && lightSpace.z > lightDepth+0.0042) {
shadowFactor = 0.2;
}
else {
float k = 0.00001;
vec3 distanceToLight = lightPosition - position.xyz;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
}
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz*shadowFactor+diffuse, gamma);
}
How can I fix this issue (Other than increasing my far plane distance)?
One other question, as this is the first time I have attempted shadowmapping: am I doing the lighting in relation to the shadows correctly?

OpenGL, diffuse shader

I'm trying to implement very simple diffuse shader in GLSL/openGL.
Here's what I got:
Vertex shader:
#version 130
in vec3 vertPos3D;
in vec3 vertNormal3D;
uniform mat3 transpMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 lightPosition;
varying vec3 vertNormal;
varying vec3 lightVector;
void main()
{
vec4 res_pos = projectionMatrix * viewMatrix * vec4(vertPos3D.xyz, 1.0);
gl_Position = res_pos;
mat4 pm = projectionMatrix * viewMatrix;
vertNormal = (viewMatrix * vec4(vertNormal3D, 0)).xyz;
lightVector = (viewMatrix * vec4(lightPosition, 1.0)).xyz - (viewMatrix * vec4(vertPos3D.xyz, 1.0)).xyz;
}
Fragment Shader:
#version 130
out vec4 color;
varying vec3 lightVector;
varying vec3 vertNormal;
void main()
{
float dot_product = max(normalize(dot(lightVector, vertNormal)), 0.0);
color = dot_product * vec4( 1.0, 1.0, 1.0, 1.0 );
}
As soon as I multiply final color with dot_product, nothing displays. I remove dot_product, everything works (except diffuse lightning ofc.). I'm afraid it's something obvious I'm missing.
A problem:
normalize(dot(lightVector, vertNormal))
dot in GLSL 1.3 returns a float.
normalize accepts a vector, not a float.
documentation for dot
documentation for normalize
A Solution, at least to this problem:
In Fragment shader, replace
float dot_product = max(normalize(dot(lightVector, vertNormal)), 0.0);
with
float dot_product = clamp(dot(lightVector, vertNormal), 0., 1.);
It looks like you are using max and normalize to avoid negative numbers returned from dot. This is exactly what clamp is for. Here's the documentation for clamp
Use
float dot_product = max(dot(normalize(lightVector), normalize(normalVector)), 0.0);
Dylan Holmes answer is slightly incorrect:
Still the lightVector needs to be normalized!
And clamping is unnecessary. max was correct. A dot product never returns a value higher then 1.0 if input vectors are normalized.

Atmospheric scattering OpenGL 3.3

Im currently trying to convert a shader by Sean O'Neil to version 330 so i can try it out in a application im writing. Im having some issues with deprecated functions, so i replaced them, but im almost completely new to glsl, so i probably did a mistake somewhere.
Original shaders can be found here:
http://www.gamedev.net/topic/592043-solved-trying-to-use-atmospheric-scattering-oneill-2004-but-get-black-sphere/
My horrible attempt at converting them:
Vertex shader:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexNormal_modelspace;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 2;
const float fSamples = 2.0;
invariant out vec3 v3Direction;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
out vec4 dgl_SecondaryColor;
out vec4 dgl_Color;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
//gg_FrontColor = vec3(1.0, 0.0, 0.0);
//gg_FrontSecondaryColor = vec3(0.0, 1.0, 0.0);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = vertexPosition_modelspace;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fCameraHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth*scale(fStartAngle);
// Initialize the scattering loop variables
gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
gl_FrontSecondaryColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.2, 0.1, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
gl_FrontSecondaryColor.rgb = v3FrontColor * fKmESun;
gl_FrontColor.rgb = v3FrontColor * (v3InvWavelength * fKrESun);
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
v3Direction = v3CameraPos - v3Pos;
dgl_SecondaryColor = gl_FrontSecondaryColor;
dgl_Color = gl_FrontColor;
}
Fragment shader:
#version 330 core
out vec4 dgl_FragColor;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
invariant in vec3 v3Direction;
in vec4 dgl_SecondaryColor;
in vec4 dgl_Color;
uniform mat4 MV;
void main (void)
{
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
dgl_FragColor = dgl_Color + fMiePhase * dgl_SecondaryColor;
dgl_FragColor.a = dgl_FragColor.b;
}
I wrote a function to render a sphere, and im trying to render this shader onto a inverted version of it, the sphere works completely fine, with normals and all. My problem is that the sphere gets rendered all black, so the shader is not working.
Edit: Got the sun to draw, but the sky is still all black.
This is how i'm trying to render the atmosphere inside my main rendering loop.
glUseProgram(programAtmosphere);
glBindTexture(GL_TEXTURE_2D, 0);
//######################
glUniform3f(v3CameraPos, getPlayerPos().x, getPlayerPos().y, getPlayerPos().z);
glm::vec3 lightDirection = lightPos/length(lightPos);
glUniform3f(v3LightPos, lightDirection.x , lightDirection.y, lightDirection.z);
glUniform3f(v3InvWavelength, 1.0f / pow(0.650f, 4.0f), 1.0f / pow(0.570f, 4.0f), 1.0f / pow(0.475f, 4.0f));
glUniform1fARB(fCameraHeight, 10.0f+length(getPlayerPos()));
glUniform1fARB(fCameraHeight2, (10.0f+length(getPlayerPos()))*(10.0f+length(getPlayerPos())));
glUniform1fARB(fInnerRadius, 10.0f);
glUniform1fARB(fInnerRadius2, 100.0f);
glUniform1fARB(fOuterRadius, 10.25f);
glUniform1fARB(fOuterRadius2, 10.25f*10.25f);
glUniform1fARB(fKrESun, 0.0025f * 20.0f);
glUniform1fARB(fKmESun, 0.0015f * 20.0f);
glUniform1fARB(fKr4PI, 0.0025f * 4.0f * 3.141592653f);
glUniform1fARB(fKm4PI, 0.0015f * 4.0f * 3.141592653f);
glUniform1fARB(fScale, 1.0f / 0.25f);
glUniform1fARB(fScaleDepth, 0.25f);
glUniform1fARB(fScaleOverScaleDepth, 4.0f / 0.25f );
glUniform1fARB(g, -0.990f);
glUniform1f(g2, -0.990f * -0.990f);
Any ideas?
Edit: updated the code, and added a picture.
I think the problem there is, that you write to 'FragColor', which may be a 'dead end' output variable in the fragment shader, since one must explicitly bind it to a color number before linking the program:
glBindFragDataLocation(programAtmosphere,0,"FragColor");
or using this in a shader:
layout(location = 0) out vec4 FragColor
You may try to use the builtin out vars instead: gl_FragColor, which is an alias for gl_FragData[0] and therefore the same as above binding.
EDIT: Forgot to say, when using the deprecated builtins, you must have a compatibility declaration:
#version 330 compatibility
EDIT 2: To test the binding, I'd write a constant color to it to disable possible calculations errors, since these may not yield the expected result, because of errors or zero input.