Volumetrics in a pixel shader - glsl

I have this shadertoy at:
https://www.shadertoy.com/view/4sVfz1
I seem to get most of the routines of volume ray marching but I am missing something in my ray marching function that computes transparency according to the accumulated density at a point.
Here is my ray marching function:
float ray( vec3 ro, vec3 rd, out float d, out float den )
{
float t = 0.0; d = EPS; den = 0.0;
for( int i = 0; i < STEPS; ++i )
{
vec3 p = ro + rd * t;
d = 0.3 * map( p, den ).x;
if( d < EPS || den > 1.0 || t > FAR ) break;
t += d;
den = fbm( p + TWA );
den += clamp( den, -1.0, 0.0 ) * t * t;
}
return t;
}

The proper ray marching function is this one:
float ray( vec3 ro, vec3 rd, out float den )
{
float t = 0.0, maxD = 0.0; den = 0.0;
for( int i = 0; i < STEPS; ++i )
{
vec3 p = ro + rd * t;
den = map( p );
maxD = maxD < den ? den : maxD;
if( maxD > 0.99 || t > FAR ) break;
t += 0.05;
}
den = maxD;
return t;
}

Related

Why are my Ray march fragment shader refelction texture lookups slowing my frame rate?

I’ve written a Fragment shader in GLSL, using shader toy.
Link : https://www.shadertoy.com/view/wtGSzy
most of it works, but when I enable texture lookups in the reflection function, the performance drops from 60FPS to 5~FPS.
The code in question is on lines 173 - 176
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. It only causes issues in the reflection function.
My question is, why are my texture lookups, in the reflection code, dropping my performance this much and what can I do to improve it?
/**
* Return the normalized direction to march in from the eye point for a single pixel.
*
* fieldOfView: vertical field of view in degrees
* size: resolution of the output image
* fragCoord: the x,y coordinate of the pixel in the output image
*/
vec3 rayDirection(float fieldOfView, vec2 size, vec2 fragCoord) {
vec2 xy = fragCoord - size / 2.0;
float z = size.y / tan(radians(fieldOfView) / 2.0);
return normalize(vec3(xy, -z));
}
float start = 0.0;
vec3 eye = vec3(0,0,5);
int MAX_MARCHING_STEPS = 255;
float EPSILON = 0.00001;
float end = 10.0;
const uint Shpere = 1u;
const uint Box = 2u;
const uint Plane = 4u;
vec3 lightPos = vec3(-10,0,5);
#define M_PI 3.1415926535897932384626433832795
const int SDF_OBJECT_COUNT = 4;
struct SDFObject
{
uint Shape;
vec3 Position;
float Radius;
int texChannelID;
float Ambiant;
float Spec;
float Diff;
vec3 BoxSize;
bool isMirror; //quick hack to get refletions working
};
SDFObject SDFObjects[SDF_OBJECT_COUNT] = SDFObject[SDF_OBJECT_COUNT](
SDFObject(Shpere, vec3(2,0,-3),1.0,0,0.2,0.2,0.8, vec3(0,0,0),true)
,SDFObject(Shpere, vec3(-2,0,-3),1.0,0,0.1,1.0,1.0, vec3(0,0,0),false)
,SDFObject(Box, vec3(0,0,-6),0.2,1,0.2,0.2,0.8, vec3(1.0,0.5,0.5),false)
,SDFObject(Plane, vec3(0,0,0),1.0,1,0.2,0.2,0.8, vec3(0.0,1.0,0.0),false)
);
float shereSDF(vec3 p, SDFObject o)
{
return length(p-o.Position)-o.Radius;
}
float boxSDF(vec3 pointToTest, vec3 boxBoundery, float radius, vec3 boxPos)
{
vec3 q = abs(pointToTest - boxPos) - boxBoundery;
return length(max(q,0.0)) + min(max(q.x, max(q.y,q.z)) ,0.0) -radius;
}
float planeSDF(vec3 p, vec4 n, vec3 Pos)
{
return dot(p-Pos, n.xyz) + n.w;
}
bool IsShadow(vec3 LightPos, vec3 HitPos)
{
bool isShadow = false;
vec3 viewRayDirection = normalize(lightPos- HitPos) ;
float depth = start;
vec3 hitpoint;
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (HitPos+ depth * viewRayDirection);
float dist = end;
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist)
{
dist = distToObjectBeingConsidered;
}
}
if(dist < EPSILON)
{
isShadow = true;
}
depth += dist;
if(depth >= end)
{
isShadow = false;
}
}
return isShadow;
}
vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore)
{
vec3 returnCol;
vec3 reflectedRay = reflect(inComingRay, surfNormal);
vec3 RayDirection = normalize(reflectedRay) ;
float depth = start;
vec3 hitpoint;
int i;
for(i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (HitPos+ depth * RayDirection);
SDFObject SDFObjectToDraw;
float dist = end;
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist && j!= objectIndexToIgnore )// D > 0.0)
{
dist = distToObjectBeingConsidered;
SDFObjectToDraw = SDFObjects[j];
}
}
if(dist < EPSILON)
{
vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position);
float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI));
float v = 0.5+ (asin(normal.y)/(M_PI));
vec2 uv =vec2(u,v);
vec4 col = vec4(0,0.5,0.5,0);
///>>>>>>>>>>>> THESE LINES ARE broken, WHY?
//if(SDFObjectToDraw.texChannelID == 0)
//col = texture(iChannel0, uv);
//if(SDFObjectToDraw.texChannelID == 1)
//col = texture(iChannel1, uv);
vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position);
float theta = dot(normal,NormalizedDirToLight);
vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal);
vec3 viewDir = normalize(SDFObjectToDraw.Position);
float Spec = dot(reflectionOfLight, viewDir);
if(IsShadow(lightPos, hitpoint))
{
returnCol= (col.xyz*SDFObjectToDraw.Ambiant);
}
else
{
returnCol= (col.xyz*SDFObjectToDraw.Ambiant)
+(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant));
}
break;
}
depth += dist;
if(depth >= end)
{
//should look up bg texture here but cant be assed right now
returnCol = vec3(1.0,0.0,0.0);
break;
}
}
return returnCol;//*= (vec3(i+1)/vec3(MAX_MARCHING_STEPS));
}
vec3 rayMarch(vec2 fragCoord)
{
vec3 viewRayDirection = rayDirection(45.0, iResolution.xy, fragCoord);
float depth = start;
vec3 hitpoint;
vec3 ReturnColour = vec3(0,0,0);
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (eye+ depth * viewRayDirection);
float dist = end;
SDFObject SDFObjectToDraw;
int objectInDexToIgnore=-1;
//find closest objecct to current point
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist)
{
dist = distToObjectBeingConsidered;
SDFObjectToDraw = SDFObjects[j];
objectInDexToIgnore = j;
}
}
//if we are close enough to an objectoto hit it.
if(dist < EPSILON)
{
vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position);
if(SDFObjectToDraw.isMirror)
{
ReturnColour = MirrorReflection( viewRayDirection, normal, hitpoint, objectInDexToIgnore);
}
else
{
float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI));
float v = 0.5+ (asin(normal.y)/(M_PI));
vec2 uv =vec2(u,v);
vec4 col;
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position);
float theta = dot(normal,NormalizedDirToLight);
vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal);
vec3 viewDir = normalize(SDFObjectToDraw.Position);
float Spec = dot(reflectionOfLight, viewDir);
if(IsShadow(lightPos, hitpoint))
{
ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant);
}
else
{
ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant)
+(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant));
//+(col.xyz* Spec * SDFObjectToDraw.Spec);
}
}
return ReturnColour;
}
depth += dist;
if(depth >= end)
{
float u = fragCoord.x/ iResolution.x;
float v = fragCoord.y/ iResolution.y;
vec4 col = texture(iChannel2, vec2(u,v));
ReturnColour =col.xyz;
}
}
return ReturnColour;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
//vec2 uv = fragCoord/iResolution.xy;
// Time varying pixel color
//vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4));
// Output to screen
lightPos *= cos(iTime+vec3(1.5,2,2));
//lightPos= vec3(cos(iTime)*2.0,0,0);
vec3 SDFCol= rayMarch(fragCoord);
vec3 col = vec3(0);
//if(SDFVal <=1.0)
// col = vec3(1,0,0);
//col = vec3(SDFVal,0,0);
col = vec3(0.5,0,0);
col = SDFCol;
fragColor = vec4(col,1.0);
}
[...] This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. [...]
The "working" texture lookup is executed in a loop in rayMarch. MAX_MARCHING_STEPS is 255, so the lookup is done at most 255 times.
vec3 rayMarch(vec2 fragCoord)
{
// [...]
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
// [...]
}
// [...]
}
When you do the lookup in MirrorReflection then the performance breaks down, because it is done in a loop in MirrorReflection and MirrorReflection is called in a loop in rayMarch. In this case the lookup is done up to 255*255 = 65025 times.
~65000 texture lookups for a fragment is far to much and cause the break down of performance.
vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore)
{
// [...]
for(i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
// [...]
}
// [...]
}
vec3 rayMarch(vec2 fragCoord)
{
// [...]
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
ReturnColour = MirrorReflection(viewRayDirection, normal, hitpoint, objectInDexToIgnore);
// [...]
}
// [...]
}

WebGL - no matching overloaded function found

I have this WebGL fragment shader on my html file which is running fine, except when I call the first 5 vec2 functions.
vec2 subCalc(vec2 z1, vec2 z2){
return vec2((z1.x - z2.x), (z1.y - z2.x));
}
I get this error:
It's weird because I use the vec4's, for example 'HSVtoRGB', the code runs fine.
full shaders
<script id="fragment-shader" type="x-shader/x-fragment">
precision mediump float;
varying vec4 actualPos;
uniform vec2 drag;
uniform float func;
uniform float n;
uniform vec2 center;
uniform float scale;
uniform float test;
vec2 subCalc(vec2 z1, vec2 z2){
return vec2((z1.x - z2.x), (z1.y - z2.x));
}
vec2 expCalc(vec2 z){
return vec2(exp(z.x)*cos(z.y),exp(z.x)*sin(z.y));
}
vec2 divCalc(vec2 z1, vec2 z2){
return vec2((z1.x*z2.x + z1.y*z2.y)/(z2.x*z2.x+z2.y*z2.y),(z1.y*z2.x-z1.x*z2.y)/(z2.x*z2.x+z2*z2.y));
}
vec2 multCalc(vec2 z1, vec2 z2){
return vec2((z1.x*z2.x - z1.y*z2.y), (z1.y*z2.x + z1.x*z2.y));
}
vec2 sumCalc(vec2 z1, vec2 z2){
return vec2((z1.x + z2.x), (z1.y + z2.x));
}
vec4 HSVtoRGB(float h, float s, float v)
{
//Convert between the HSV and RGB color model.
//Taken from http://www.cs.rit.edu/~ncs/color/t_convert.html and rewritten for GLSL
int i;
float f, p, q, t;
vec4 RGB;
if (s == 0.0)
{
// achromatic (grey)
RGB.x = RGB.y = RGB.z = v;
RGB.w = 1.0;
return RGB;
}
h /= 60.0; // sector 0 to 5
i = int(floor(h));
f = h - float(i); // fracional part of h
p = v * (1.0 - s);
q = v * (1.0 - s * f);
t = v * (1.0 - s * (1.0 - f));
if(i==0)
{
RGB.x = v;
RGB.y = t;
RGB.z = p;
}
else if(i==1)
{
RGB.x = q;
RGB.y = v;
RGB.z = p;
}
else if(i==2)
{
RGB.x = p;
RGB.y = v;
RGB.z = t;
}
else if(i==3)
{
RGB.x = p;
RGB.y = q;
RGB.z = v;
}
else if(i==4)
{
RGB.x = t;
RGB.y = p;
RGB.z = v;
}
else if(i==5)
{
RGB.x = v;
RGB.y = p;
RGB.z = q;
}
else
{
RGB.x = 1.0;
RGB.y = 1.0;
RGB.z = 1.0;
}
RGB.w = 1.0;
return RGB;
}
vec4 secondCase(vec4 posi){
float multX = ((posi.x * posi.x) - (posi.y * posi.y));
float multY = ((posi.y * posi.x) + (posi.x * posi.y));
float newX = multX;
float newY = multY;
float r = pow(sqrt((newX)*(newX)+(newY)*(newY)),n);
for(float i = 0.0; i <=10000.0; i++){
if(i>=n){break;}
newX = ((newX * posi.x) - (newY * posi.y));
newY = ((newY * posi.x) + (newX * posi.y));
}
float h = (atan(newY/newX));
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(newX <= 0.0 && newY > 0.0)
h = 180.0 - h;
if(newX < 0.0 && newY < 0.0)
h = 180.0 + h;
if(newX >= 0.0 && newY < 0.0)
h = 360.0 - h;
return HSVtoRGB(h,s,v);
}
vec4 thirdCase(vec4 posi){
vec2 divi = vec2((100.0*posi.x + 0.0*posi.y)/(posi.x*posi.x+posi.y*posi.y),(0.0*posi.x-100.0*posi.y)/(posi.x*posi.x+posi.y*posi.y));
vec2 exp = vec2(exp(divi.x)*cos(divi.y),exp(divi.x)*sin(divi.y));
float r = pow(sqrt((exp.x)*(exp.x)+(exp.y)*(exp.y)),n);
float h = (atan((exp.y/exp.x)));
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(exp.x <= 0.0 && exp.y > 0.0)
h = 180.0 - h;
if(exp.x < 0.0 && exp.y < 0.0)
h = 180.0 + h;
if(exp.x >= 0.0 && exp.y < 0.0)
h = 360.0 - h;
return HSVtoRGB(h,s,v);
}
vec4 forthCase(vec4 posi){
vec2 divi = vec2((100.0*posi.x + 0.0*posi.y)/(posi.x*posi.x+posi.y*posi.y),(0.0*posi.x-100.0*posi.y)/(posi.x*posi.x+posi.y*posi.y));
vec2 exp = vec2(exp(divi.x)*cos(divi.y),exp(divi.x)*sin(divi.y));
float r = pow(sqrt((exp.x)*(exp.x)+(exp.y)*(exp.y)),n);
float h = (atan((exp.y/exp.x))*3.0);
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(exp.x <= 0.0 && exp.y > 0.0)
h = 180.0 - h;
if(exp.x < 0.0 && exp.y < 0.0)
h = 180.0 + h;
if(exp.x >= 0.0 && exp.y < 0.0)
h = 360.0 - h;
return HSVtoRGB(h,s,v);
}
void main() {
vec4 finalPosition = actualPos;
finalPosition.x += drag.x;
finalPosition.y += drag.y;
finalPosition.x *= test;
finalPosition.y *= test;
float r = sqrt((finalPosition.x)*(finalPosition.x)+(finalPosition.y)*(finalPosition.y));
float h = atan((finalPosition.y/finalPosition.x));
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(finalPosition.x < 0.0 && finalPosition.y > 0.0)
h = 180.0 - h;
if(finalPosition.x < 0.0 && finalPosition.y < 0.0)
h = 180.0 + h;
if(finalPosition.x > 0.0 && finalPosition.y < 0.0)
h = 360.0 - h;
vec2 firstMemb = expCalc(finalPosition);
if(func == 1.0){
gl_FragColor = HSVtoRGB(h,s,v);
} else if(func == 2.0){
gl_FragColor = secondCase(finalPosition);
} else if(func == 3.0){
gl_FragColor = thirdCase(finalPosition);
}
}
</script>
Thanks in advance.
This line
vec2 firstMemb = expCalc(finalPosition);
Is trying to pass a vec4 to a function that takes a vec2

c++ rgb to hsv conversion

i am trying to write a program that converts RGB color space to HSV space. it is based on the conversion algorithm described here . most of the places i searched for has a similar algorithm for conversion.
my input to the program: 67 42 96
output: 267.778 , 0.5625, 96
^^^^^ // it should be 38 as below
expected output: 268 56% 38%
^^^^^^
i can see for other inputs also H & S values are as they should be but the V value is different. what could be the reason fot that?
#include<iostream>
#include <algorithm> // std::max
using namespace std;
void rgb2hsv(float r, float g, float b) {
float h = 0.0;
float s = 0.0;
float v = 0.0;
float min = std::min( std::min(r, g), b );
float max = std::max( std::max(r, g), b );
v = max; // v
float delta = max - min;
if( max != 0.0 )
s = delta / max; // s
else {
// r = g = b = 0 // s = 0, v is undefined
s = 0.0;
h = -1.0;
cout<<h<<" , "<<s<<" , "<<v<<endl;
}
if( r == max )
h = ( g - b ) / delta; // between yellow & magenta
else if( g == max )
h = 2.0 + ( b - r ) / delta; // between cyan & yellow
else
h = 4.0 + ( r - g ) / delta; // between magenta & cyan
h = h * 60.0; // degrees
if( h < 0.0 )
h += 360.0;
cout<<h<<" , "<<s<<" , "<<v<<endl;
}
int main(){
while(1){
float r,g,b;
cin>>r>>g>>b;
rgb2hsv(r,g,b);
}
return 0;
}

colourful Noise in luminance filter opengl

My final goal is to detect a laser line in my picture. To do so, first I convert my RGB colour space to HSV(in order to examine brightness). Then I will select only the pixels which have a certain value of H, S and V (the brightest pixels with a certain colour (red in my case)).
The pixels which satisfy these values I set their luminance for all 3 channels of RGB and if they don't satisfy I set them as black.
Here comes my problem and question:
As I mentioned, I would have either a black pixel or a grey (luminance) pixel. I don't understand how these purple or green pixels come into my picture. They are like noise and they are constantly changing!
At first, I thought I have these colour because of values bigger than 1. But I think OpenGL clamps the values to 0-1( I even tried it myself but no success).
Anyone know what causes this effect?
Any help or idea is appreciated.
Here is my fragment shader:
precision highp float;
varying vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
const highp vec3 W = vec3(0.299, 0.587, 0.114);
void main()
{
lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
float luminance = dot(textureColor.rgb, W);
float r = textureColor.r;
float g = textureColor.g;
float b = textureColor.b;
float h;
float s;
float v;
float min = 0.0;
float max = 0.0;
float delta = 0.0;
if(r >= g) {
if(r >= b) {
max = r;
}
else {
max = b;
}
}
else {
if(g >= b) {
max = g;
}
else {
max = b;
}
}
// max = MAX( r, g, b );
if(r < g) {
if(r < b) {
min = r;
}
else {
min = b;
}
}
else {
if(g < b) {
min = g;
}
else {
min = b;
}
}
v = max; // v
delta = max - min;
if (delta == 0.0) {
h = 0.0;
s = 0.0;
return;
}
else if( max != 0.0 )
s = delta / max; // s
else {
// r = g = b = 0 // s = 0, v is undefined
s = 0.0;
h = -1.0;
return;
}
if( r == max ){
h = ( g - b ) / delta; // between yellow & magenta
h = mod(h, 6.0);
}
else if( g == max )
h = 2.0 + ( b - r ) / delta; // between cyan & yellow
else
h = 4.0 + ( r - g ) / delta; // between magenta & cyan
h = h * 60.0; // degrees
if( h < 0.0 )
h = h + 360.0;
//HSV transformation ends here
if(v >0.8){
if(s > 0.7){
if( h >320.0 && h < 360.0){
if(luminance > 1.0)
gl_FragColor = vec4(vec3(1.0), textureColor.a);
else
gl_FragColor = vec4(vec3(luminance ), textureColor.a);
}
}
}else{
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
}
I have to mention that, the big white spot is sunlight and it's so bright that it passes my HSV conditions at the end. That's not my problem now, but these purple noise like pixels which are there or the green noises around the picture are my problem.
The purple and green pixels are probably uninitialized memory.
gl_FragColor is not guaranteed to be initialized at the start of a shader, so initializing it before the return statements on lines 70 and 78 should fix the issue.
If it does not and an uninitialized framebuffer is used, then it may be because of a different (and less likey) cause:
It may be because of blending. Try disabling it or ensuring gl_FragColor.a is 1.
It may be because the stencil buffer is exposing it. Try disabling stencil testing or ensuring that all pixels pass the stencil test.
Alternatively, if it is caused by blending of the stencil test, you could initialize the framebuffer with something like glClear().
While your fragment "returns" without setting a fragment color, like here:
else {
// r = g = b = 0 // s = 0, v is undefined
s = 0.0;
h = -1.0;
return; //<<<<==== NO gl_FragColor set
I think this code lacks a else-path for the cases s <= 0.7 or h <= 320.0:
if(v >0.8){
if(s > 0.7){
if( h >320.0 && h < 360.0){
if(luminance > 1.0)
gl_FragColor = vec4(vec3(1.0), textureColor.a);
else
gl_FragColor = vec4(vec3(luminance ), textureColor.a);
}
//<<<<===== NO 'else' for h<320
}
///<<<<====== NO 'else' for s<0.7
}else{
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
Thus, there are many cases where the fragment (current pixel on image) remains untouched.
So my mistake was some places which I have returned but didn't set the gl_FragColor. In my case, any where which the process failed or didn't meet the conditions I have to set:
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
The corrected code should look like:
precision highp float;
varying vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
const highp vec3 W = vec3(0.299, 0.587, 0.114);
void main()
{
lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
float luminance = dot(textureColor.rgb, W);
//RF: test
float r = textureColor.r;
float g = textureColor.g;
float b = textureColor.b;
float h;
float s;
float v;
float min = 0.0;
float max = 0.0;
float delta = 0.0;
// min = MIN( r, g, b );
if(r >= g) {
if(r >= b) {
max = r;
}
else {
max = b;
}
}
else {
if(g >= b) {
max = g;
}
else {
max = b;
}
}
if(r < g) {
if(r < b) {
min = r;
}
else {
min = b;
}
}
else {
if(g < b) {
min = g;
}
else {
min = b;
}
}
v = max; // v
delta = max - min;
if (delta == 0.0) {
h = 0.0;
s = 0.0;
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
return;
}
else if( max != 0.0 )
s = delta / max; // s
else {
s = 0.0;
h = -1.0;
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
return;
}
if( r == max ){
h = ( g - b ) / delta; // between yellow & magenta
h = mod(h, 6.0);
}
else if( g == max )
h = 2.0 + ( b - r ) / delta; // between cyan & yellow
else
h = 4.0 + ( r - g ) / delta; // between magenta & cyan
h = h * 60.0; // degrees
if( h < 0.0 )
h = h + 360.0;
//---------------------------------------------------
if(v >0.8){
if(s > 0.2){
if( h >320.0 && h < 360.0){
gl_FragColor = vec4(vec3(luminance ), textureColor.a);
return;
}
}
}
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
and then it works great:
Resulting Image

Omni-directional light in raytracing program gives wrong render c++

I am trying to implement an omni-directional light source (a.k.a., point light source) in my raytracing program in C++. I am not getting the expected results, but I can't figure out the problem. Maybe someone can see what I am doing wrong.
I have included the two functions that are responsible for raytracing and the light. The ClosestIntersection function finds the closest intersection and a triangle. That is used later in the DirectLight function.
I would really appreciate any help.
#include <iostream>
#include <glm/glm.hpp>
#include <SDL.h>
#include "SDLauxiliary.h"
#include "TestModel.h"
#include "math.h"
using namespace std;
using glm::vec3;
using glm::mat3;
// ----------------------------------------------------------------------------
// GLOBAL VARIABLES
const int SCREEN_WIDTH = 500;
const int SCREEN_HEIGHT = 500;
SDL_Surface* screen;
int t;
vector<Triangle> triangles;
float focalLength = 900;
vec3 cameraPos(0, 0, -4.5);
vec3 lightPos(0.5, 0.5, 0);
vec3 lightColor = 14.f * vec3(1,1,1);
// Translate camera
float translation = 0.1; // use this to set translation increment
// Rotate camera
float yaw;
vec3 trueCameraPos;
const float PI = 3.1415927;
// ----------------------------------------------------------------------------
// CLASSES
class Intersection;
// ----------------------------------------------------------------------------
// FUNCTIONS
void Update();
void Draw();
bool ClosestIntersection(vec3 start, vec3 dir, const vector<Triangle>& triangles,
Intersection& closestIntersection);
vec3 DirectLight(const Intersection& i);
// ----------------------------------------------------------------------------
// STRUCTURES
struct Intersection
{
vec3 position;
float distance;
int triangleIndex;
};
float m = std::numeric_limits<float>::max();
int main(int argc, char* argv[])
{
LoadTestModel(triangles);
screen = InitializeSDL(SCREEN_WIDTH, SCREEN_HEIGHT);
t = SDL_GetTicks(); // Set start value for timer.
while (NoQuitMessageSDL())
{
Update();
Draw();
}
SDL_SaveBMP(screen, "screenshot.bmp");
return 0;
}
void Update()
{
// Compute frame time:
int t2 = SDL_GetTicks();
float dt = float(t2 - t);
t = t2;
cout << "Render time: " << dt << " ms." << endl;
}
}
void Draw()
{
if (SDL_MUSTLOCK(screen))
SDL_LockSurface(screen);
for (int y = 0; y<SCREEN_HEIGHT; ++y)
{
for (int x = 0; x < SCREEN_WIDTH; ++x)
{
vec3 start = cameraPos;
vec3 dir(x - SCREEN_WIDTH / 2, y - SCREEN_HEIGHT / 2, focalLength);
Intersection intersection;
if (ClosestIntersection(start, dir, triangles, intersection))
{
//vec3 theColor = triangles[intersection.triangleIndex].color;
vec3 theColor = DirectLight(intersection);
PutPixelSDL(screen, x, y, theColor);
}
else
{
vec3 color(0, 0, 0);
PutPixelSDL(screen, x, y, color);
}
}
}
if (SDL_MUSTLOCK(screen))
SDL_UnlockSurface(screen);
SDL_UpdateRect(screen, 0, 0, 0, 0);
}
bool ClosestIntersection(vec3 s, vec3 d,
const vector<Triangle>& triangles, Intersection& closestIntersection)
{
closestIntersection.distance = m;
for (size_t i = 0; i < triangles.size(); i++)
{
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
vec3 u = v1 - v0;
vec3 v = v2 - v0;
vec3 b = s - v0;
vec3 x;
// Determinant of A = [-d u v]
float det = -d.x * ((u.y * v.z) - (v.y * u.z)) -
u.x * ((-d.y * v.z) - (v.y * -d.z)) +
v.x * ((-d.y * u.z) - (u.y * -d.z));
// Cramer'r Rule for t = x.x
x.x = (b.x * ((u.y * v.z) - (v.y * u.z)) -
u.x * ((b.y * v.z) - (v.y * b.z)) +
v.x * ((b.y * u.z) - (u.y * b.z))) / det;
if (x.x >= 0)
{
// Cramer'r Rule for u = x.y
x.y = (-d.x * ((b.y * v.z) - (v.y * b.z)) -
b.x * ((-d.y * v.z) - (v.y * -d.z)) +
v.x * ((-d.y * b.z) - (b.y * -d.z))) / det;
// Cramer'r Rule for v = x.z
x.z = (-d.x * ((u.y * b.z) - (b.y * u.z)) -
u.x * ((-d.y * b.z) - (b.y * -d.z)) +
b.x * ((-d.y * u.z) - (u.y * -d.z))) / det;
if (x.y >= 0 && x.z >= 0 && x.y + x.z <= 1 && x.x < closestIntersection.distance)
{
closestIntersection.position = x;
closestIntersection.distance = x.x;
closestIntersection.triangleIndex = i;
}
}
}
//end of for loop
if (closestIntersection.distance != m)
{
return true;
}
else
{
return false;
}
}
vec3 DirectLight(const Intersection& i)
{
vec3 n = triangles[i.triangleIndex].normal;
vec3 r = lightPos - i.position;
float R2 = r.x * r.x + r.y * r.y + r.z * r.z;
vec3 D = (lightColor * fmaxf((glm::dot(glm::normalize(r), n)), 0)) / (4 * PI * R2);
return D;
}
If I'm understanding the code in ClosestIntersection correctly, here's what it's doing for each triangle:
Let u,v be the vectors from one vertex of the triangle to the other two vertices. Let d be (the reverse of) the direction of the ray we're considering.
And let b be the vector from that vertex of the triangle to the camera.
Find p,q,r so that b = pd+qu+rv (p,q,r are what your code calls x.x, x.y, x.z).
Now the ray meets the triangle if p>0, q>=0, r>=0, q+r<=1 and the distance to the intersection point is p.
So, the conditions on q,r make sense; the idea is that b-qu-rv is the vector from the camera to the relevant point in the triangle and it's in direction d. Your distances aren't really distances, but along a single ray they're the same multiple of the actual distance, which means that this works fine for determining which triangle you've hit, and that's all you use them for. So far, so good.
But then you say closestIntersection.position = x; and surely that's all wrong, because this x isn't in the same coordinate system as your camera location, triangle vertices, etc. It's in this funny "how much of d, how much of u, how much of v" coordinate system which isn't even the same from one triangle to the next. (Which is why you are getting discontinuities at triangle boundaries even within a single face, I think.)
Try setting it to v0+x.y*(v1-v0)+x.z*(v2-v0) instead (I think this is right; it's meant to be the actual point where the ray crosses the triangle, in the same coordinates as all your other points) and see what it does.
This isn't a super-great answer, but I managed to make your code work without the strange shading discontinuities. The problem happens in ClosestIntersection and maybe Gareth's answer covers it. I need to stop looking at this now, but I wanted to show you what I have before I leave, and I need an Answer to post some code.
// This starts with some vec3 helper functions which make things
// easier to look at
float Dot(const vec3& a, const vec3& b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
vec3 Cross(const vec3& a, const vec3& b) {
return vec3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x);
}
float L2(const vec3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
float Abs(const vec3& v) { return std::sqrt(L2(v)); }
// Here is the replacement version of ClosestIntersection
bool ClosestIntersection(vec3 cam, vec3 dir,
const vector<Triangle>& triangles, Intersection& closestIntersection)
{
closestIntersection.distance = m;
vec3 P0 = cam;
vec3 P1 = cam + dir;
for (size_t i = 0; i < triangles.size(); ++i) {
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
// Dan Sunday
// http://geomalgorithms.com/a06-_intersect-2.html
vec3 u = v1 - v0;
vec3 v = v2 - v0;
// w = P-v0, solve w = su +tv (s, t are parametric scalars)
vec3 n = Cross(u, v);
float ri = Dot(n, (v0 - P0)) / Dot(n, (P1 - P0));
vec3 Pi = P0 + ri * (P1- P0);
vec3 w = Pi - v0;
// s = w . (n x v) / (u . (n x v))
// t = w . (n x u) / (v . (n x u))
float s = Dot(w, Cross(n, v)) / Dot(u, Cross(n, v));
float t = Dot(w, Cross(n, u)) / Dot(v, Cross(n, u));
if(s >= 0 && t >= 0 && s+t <= 1) {
float dist = Abs(cam - Pi);
if(dist < closestIntersection.distance) {
closestIntersection.position = Pi;
closestIntersection.distance = dist;
closestIntersection.triangleIndex = int(i);
}
}
}
return closestIntersection.distance != m;
}
Good luck.