Uniform optimization ( - 1 values) - c++

Consider following fragment shader:
uniform PolygonData
{
int count;
float points[1024];
} polygon;
out vec4 outColor;
void main()
{
float j;
for (int i = 0; i < polygon.count; ++i)
{
j = polygon.points[i++];
j = polygon.points[i++];
j = polygon.points[i++];
}
outColor = vec4(1, 1, j, 1);
}
Why is polygon.count and polygon.points optimized out?

#GuyRT
Yes. That was it. From there I got the ingormation that I was exceeding the maximum uniform array size.
This is what i was doing:
Vertex:
in vec3 inPosition;
void main(void)
{
gl_Position = vec4(inPosition, 1.0);
}
Geometry:
layout(lines) in;
layout(triangle_strip) out;
layout(max_vertices = 4) out;
out vec3 worldPos;
uniform mat4 u_proj;
void main()
{
vec4 pos0 = u_proj * gl_in[0].gl_Position;
vec4 pos1 = u_proj * gl_in[1].gl_Position;
//left up
gl_Position.x = pos0.x;
gl_Position.y = pos1.y;
gl_Position.w = pos0.w;
worldPos = gl_Position.xyz;
EmitVertex();
//left down
gl_Position = pos0;
worldPos = gl_Position.xyz;
EmitVertex();
//right up
gl_Position = pos1;
worldPos = gl_Position.xyz;
EmitVertex();
//right down
gl_Position.x = pos1.x;
gl_Position.y = pos0.y;
gl_Position.w = pos1.w;
worldPos = gl_Position.xyz;
EmitVertex();
EndPrimitive();
}
Fragment:
struct PolyData
{
int count;
float points[256];
};
uniform PolyData p;
uniform mat4 u_proj;
in vec3 worldPos;
out vec4 outColor;
void main()
{
float testx = worldPos.x;
float testy = worldPos.y;
bool c = false;
int i;
int j;
for (i = 0, j = p.count - 4; i < p.count; j = i, i = i + 3)
{
vec4 i_vec = u_proj * vec4(p.points[i], p.points[i + 1], 0, 1.0);
vec4 j_vec = u_proj * vec4(p.points[j], p.points[j + 1], 0, 1.0);
if ( (i_vec.y >= testy) != (j_vec.y >= testy) && (testx <= (j_vec.x - i_vec.x) * (testy - i_vec.y) / (j_vec.y - i_vec.y) + i_vec.x))
c = !c;
}
if (c == true)
{
outColor = vec4(1, 1, 0, 1);
}
else
{
outColor = vec4(0, 0, 0, 0);
}
}
Thanks for all the help.

Related

Why are my Ray march fragment shader refelction texture lookups slowing my frame rate?

I’ve written a Fragment shader in GLSL, using shader toy.
Link : https://www.shadertoy.com/view/wtGSzy
most of it works, but when I enable texture lookups in the reflection function, the performance drops from 60FPS to 5~FPS.
The code in question is on lines 173 - 176
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. It only causes issues in the reflection function.
My question is, why are my texture lookups, in the reflection code, dropping my performance this much and what can I do to improve it?
/**
* Return the normalized direction to march in from the eye point for a single pixel.
*
* fieldOfView: vertical field of view in degrees
* size: resolution of the output image
* fragCoord: the x,y coordinate of the pixel in the output image
*/
vec3 rayDirection(float fieldOfView, vec2 size, vec2 fragCoord) {
vec2 xy = fragCoord - size / 2.0;
float z = size.y / tan(radians(fieldOfView) / 2.0);
return normalize(vec3(xy, -z));
}
float start = 0.0;
vec3 eye = vec3(0,0,5);
int MAX_MARCHING_STEPS = 255;
float EPSILON = 0.00001;
float end = 10.0;
const uint Shpere = 1u;
const uint Box = 2u;
const uint Plane = 4u;
vec3 lightPos = vec3(-10,0,5);
#define M_PI 3.1415926535897932384626433832795
const int SDF_OBJECT_COUNT = 4;
struct SDFObject
{
uint Shape;
vec3 Position;
float Radius;
int texChannelID;
float Ambiant;
float Spec;
float Diff;
vec3 BoxSize;
bool isMirror; //quick hack to get refletions working
};
SDFObject SDFObjects[SDF_OBJECT_COUNT] = SDFObject[SDF_OBJECT_COUNT](
SDFObject(Shpere, vec3(2,0,-3),1.0,0,0.2,0.2,0.8, vec3(0,0,0),true)
,SDFObject(Shpere, vec3(-2,0,-3),1.0,0,0.1,1.0,1.0, vec3(0,0,0),false)
,SDFObject(Box, vec3(0,0,-6),0.2,1,0.2,0.2,0.8, vec3(1.0,0.5,0.5),false)
,SDFObject(Plane, vec3(0,0,0),1.0,1,0.2,0.2,0.8, vec3(0.0,1.0,0.0),false)
);
float shereSDF(vec3 p, SDFObject o)
{
return length(p-o.Position)-o.Radius;
}
float boxSDF(vec3 pointToTest, vec3 boxBoundery, float radius, vec3 boxPos)
{
vec3 q = abs(pointToTest - boxPos) - boxBoundery;
return length(max(q,0.0)) + min(max(q.x, max(q.y,q.z)) ,0.0) -radius;
}
float planeSDF(vec3 p, vec4 n, vec3 Pos)
{
return dot(p-Pos, n.xyz) + n.w;
}
bool IsShadow(vec3 LightPos, vec3 HitPos)
{
bool isShadow = false;
vec3 viewRayDirection = normalize(lightPos- HitPos) ;
float depth = start;
vec3 hitpoint;
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (HitPos+ depth * viewRayDirection);
float dist = end;
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist)
{
dist = distToObjectBeingConsidered;
}
}
if(dist < EPSILON)
{
isShadow = true;
}
depth += dist;
if(depth >= end)
{
isShadow = false;
}
}
return isShadow;
}
vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore)
{
vec3 returnCol;
vec3 reflectedRay = reflect(inComingRay, surfNormal);
vec3 RayDirection = normalize(reflectedRay) ;
float depth = start;
vec3 hitpoint;
int i;
for(i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (HitPos+ depth * RayDirection);
SDFObject SDFObjectToDraw;
float dist = end;
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist && j!= objectIndexToIgnore )// D > 0.0)
{
dist = distToObjectBeingConsidered;
SDFObjectToDraw = SDFObjects[j];
}
}
if(dist < EPSILON)
{
vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position);
float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI));
float v = 0.5+ (asin(normal.y)/(M_PI));
vec2 uv =vec2(u,v);
vec4 col = vec4(0,0.5,0.5,0);
///>>>>>>>>>>>> THESE LINES ARE broken, WHY?
//if(SDFObjectToDraw.texChannelID == 0)
//col = texture(iChannel0, uv);
//if(SDFObjectToDraw.texChannelID == 1)
//col = texture(iChannel1, uv);
vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position);
float theta = dot(normal,NormalizedDirToLight);
vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal);
vec3 viewDir = normalize(SDFObjectToDraw.Position);
float Spec = dot(reflectionOfLight, viewDir);
if(IsShadow(lightPos, hitpoint))
{
returnCol= (col.xyz*SDFObjectToDraw.Ambiant);
}
else
{
returnCol= (col.xyz*SDFObjectToDraw.Ambiant)
+(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant));
}
break;
}
depth += dist;
if(depth >= end)
{
//should look up bg texture here but cant be assed right now
returnCol = vec3(1.0,0.0,0.0);
break;
}
}
return returnCol;//*= (vec3(i+1)/vec3(MAX_MARCHING_STEPS));
}
vec3 rayMarch(vec2 fragCoord)
{
vec3 viewRayDirection = rayDirection(45.0, iResolution.xy, fragCoord);
float depth = start;
vec3 hitpoint;
vec3 ReturnColour = vec3(0,0,0);
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (eye+ depth * viewRayDirection);
float dist = end;
SDFObject SDFObjectToDraw;
int objectInDexToIgnore=-1;
//find closest objecct to current point
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist)
{
dist = distToObjectBeingConsidered;
SDFObjectToDraw = SDFObjects[j];
objectInDexToIgnore = j;
}
}
//if we are close enough to an objectoto hit it.
if(dist < EPSILON)
{
vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position);
if(SDFObjectToDraw.isMirror)
{
ReturnColour = MirrorReflection( viewRayDirection, normal, hitpoint, objectInDexToIgnore);
}
else
{
float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI));
float v = 0.5+ (asin(normal.y)/(M_PI));
vec2 uv =vec2(u,v);
vec4 col;
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position);
float theta = dot(normal,NormalizedDirToLight);
vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal);
vec3 viewDir = normalize(SDFObjectToDraw.Position);
float Spec = dot(reflectionOfLight, viewDir);
if(IsShadow(lightPos, hitpoint))
{
ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant);
}
else
{
ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant)
+(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant));
//+(col.xyz* Spec * SDFObjectToDraw.Spec);
}
}
return ReturnColour;
}
depth += dist;
if(depth >= end)
{
float u = fragCoord.x/ iResolution.x;
float v = fragCoord.y/ iResolution.y;
vec4 col = texture(iChannel2, vec2(u,v));
ReturnColour =col.xyz;
}
}
return ReturnColour;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
//vec2 uv = fragCoord/iResolution.xy;
// Time varying pixel color
//vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4));
// Output to screen
lightPos *= cos(iTime+vec3(1.5,2,2));
//lightPos= vec3(cos(iTime)*2.0,0,0);
vec3 SDFCol= rayMarch(fragCoord);
vec3 col = vec3(0);
//if(SDFVal <=1.0)
// col = vec3(1,0,0);
//col = vec3(SDFVal,0,0);
col = vec3(0.5,0,0);
col = SDFCol;
fragColor = vec4(col,1.0);
}
[...] This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. [...]
The "working" texture lookup is executed in a loop in rayMarch. MAX_MARCHING_STEPS is 255, so the lookup is done at most 255 times.
vec3 rayMarch(vec2 fragCoord)
{
// [...]
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
// [...]
}
// [...]
}
When you do the lookup in MirrorReflection then the performance breaks down, because it is done in a loop in MirrorReflection and MirrorReflection is called in a loop in rayMarch. In this case the lookup is done up to 255*255 = 65025 times.
~65000 texture lookups for a fragment is far to much and cause the break down of performance.
vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore)
{
// [...]
for(i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
// [...]
}
// [...]
}
vec3 rayMarch(vec2 fragCoord)
{
// [...]
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
ReturnColour = MirrorReflection(viewRayDirection, normal, hitpoint, objectInDexToIgnore);
// [...]
}
// [...]
}

Not getting correct position of Shadows , CPU based Raytracing

I am trying to calculate shadows in a CPU based Raytracing, But i am not getting the shadows in the exact position,
I am trying to design famous Cornell Box,
Also the Spheres in the figure are being shaded improperly,
I have pasted the whole code.
I have used 2 spheres and 10 triangles in scene,
I guess there is problem with my shadow tracing algorithm or normal calculation
class Figure{
public:
Vec3 position;
Vec3 cl;
Vec3 normal;
Figure(void);
Figure(Vec3 pos,Vec3 col,Vec3 Normal);
virtual bool intersection(float* t,Vec3 origin,Vec3 direction);
virtual Vec3 calculateNormal(Vec3 p0,float *intensity,Vec3* Diffusecolor,Vec3* Specular);
virtual bool intersectionShadow(float* t,Vec3 origin,Vec3 direction);
};
Figure::Figure(){
position = Vec3(0,0,0);
cl = Vec3(0,0,0);
normal = Vec3(0,0,0);
}
Figure::Figure(Vec3 post, Vec3 coli,Vec3 Normal){
position = post;
cl = coli;
normal = Normal;
}
bool Figure::intersection(float *t, Vec3 origin,Vec3 direction){
return false;
}
Vec3 Figure::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
return normal;
}
bool Figure::intersectionShadow(float *t, Vec3 origin, Vec3 direction){
return false;
}
class Plane:public Figure{
public:
Vec3 planeNormal;
Plane(void);
Plane(Vec3 pos,Vec3 norm,Vec3 c);
bool intersection(float *t, Vec3 origin, Vec3 direction);
Vec3 Plane::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular);
};
Plane::Plane(void){
planeNormal=Vec3(0,0,0);
}
Plane::Plane(Vec3 pos,Vec3 norm,Vec3 c){
position = pos;
planeNormal = norm;
cl = c;
}
bool Plane::intersection(float *t, Vec3 origin, Vec3 direction){
float denom = planeNormal.dot(direction);
if(abs(denom)<0.0001f){
return false;
}
else{
Vec3 p_or = position-origin;
float res = p_or.dot(planeNormal)/denom;
*t = res;
}
}
Vec3 Plane::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
*intensity = 0;
*Diffusecolor = Vec3(0.7, 0.7, 0.7);
*Specular = cl;
return planeNormal;
}
class Sphere:public Figure{
public:
float radius;
Sphere(void);
Sphere(Vec3 pos,float rad,Vec3 col);
bool intersection(float* t,Vec3 origin,Vec3 direction);
Vec3 calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular);
};
Sphere::Sphere(){
position = Vec3(0,0,-2);
radius = 0.3f;
cl = Vec3(1.0,0,0);
}
Sphere::Sphere(Vec3 pos, float rad, Vec3 col){
position = pos;
radius = rad;
cl = col;
}
bool Sphere::intersection(float *t, Vec3 origin,Vec3 direction){
Vec3 oc = origin - position;
float a = direction.dot(direction);
float b = 2.0f * oc.dot(direction);
float c = oc.dot(oc) - radius*radius;
float discriminant = b*b - 4*a*c;
if (discriminant < 0) {
return false;
}
else {
float t0;
t0 = std::max((-b + sqrt(discriminant) ) / (2.0f*a),(-b - sqrt(discriminant) ) / (2.0f*a));
*t = t0;
return true;
}
}
Vec3 Sphere::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
*intensity = 50.0f;
*Diffusecolor = cl;
*Specular = Vec3(0.7f, 0.7f, 0.7);
return (p0-position);
}
class Triangle:public Figure{
public:
Vec3 v0;
Vec3 v1;
Vec3 v2;
Vec3 norm;
Vec3 ed0,ed1;
float u,v,w;
Triangle(void);
Triangle(Vec3 a,Vec3 b,Vec3 c,Vec3 col);
bool intersection(float* t,Vec3 origin,Vec3 direction);
bool intersectionShadow(float* t,Vec3 origin,Vec3 direction);
Vec3 calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular);
};
Triangle::Triangle(){
v0 = Vec3(0,0,0);
v1 = Vec3(0,0,0);
v2 = Vec3(0,0,0);
}
Triangle::Triangle(Vec3 a, Vec3 b, Vec3 c, Vec3 col){
v0 = a;
v1 = b;
v2 = c;
cl = col;
}
bool Triangle::intersection(float *t, Vec3 origin,Vec3 direction){
ed0 = v1-v0;
ed1 = v2-v0;
Vec3 r_o = origin-v0;
Vec3 r_ed = direction.cross(ed1);
u = r_o.dot(r_ed)/ed0.dot(r_ed);
Vec3 r0_ed0 = r_o.cross(ed0);
float rd_r0_ed0 = direction.dot(r0_ed0);
v = rd_r0_ed0/ed0.dot(r_ed);
float ed_r0_ed0 = ed1.dot(r0_ed0);
float t0 = ed_r0_ed0/ed0.dot(r_ed);
w = 1-u-v;
if((u<0) || (u>1)){
return false;
}
else if((v<0) || (u+v>1)){
return false;
}
else {
*t = t0;
return true;
}
}
Vec3 Triangle::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
*intensity = 0;
*Diffusecolor = Vec3(0.7, 0.7, 0.7);
*Specular = cl;
//https://www.tjhsst.edu/~dhyatt/supercomp/n310.html
Vec3 d1 = Vec3(v1.x()-v0.x(),v1.y()-v0.y(),v1.z()-v0.z());
Vec3 d2 = Vec3(v2.x()-v1.x(),v2.y()-v1.y(),v2.z()-v1.z());
Vec3 n = (d1.cross(d2));
return n ;
}
bool Triangle::intersectionShadow(float* t,Vec3 origin,Vec3 direction){
return false;
}
using Colour = Vec3; // RGB Value
Colour red() { return Colour(1.0f, 0.0f, 0.0f); }
Colour white() { return Colour(1.0f, 1.0f, 1.0f); }
Colour black() { return Colour(0.0f, 0.0f, 0.0f); }
uchar BoundPixelValue(int shading)
{
if (shading < 0) return 0;
if (shading >= 255) return 255;
return shading;
}
Vec3 scalar_multiply(Vec3 b,float v){
return Vec3(b.x()*v,b.y()*v,b.z()*v);
}
int main(int, char**){
Vec3 v0 = Vec3(-1.0f,-1.0f,-1.0f);
Vec3 v1 = Vec3(-1.0f,-1.0f,-2.0f);
Vec3 v2 = Vec3(-1.0f,1.0f,-1.0f);
Vec3 v3 = Vec3(-1.0f,1.0f,-2.0f);
Vec3 v4 = Vec3(1.0f,-1.0f,-1.0f);
Vec3 v5 = Vec3(1.0f,-1.0f,-2.0f);
Vec3 v6 = Vec3(1.0f,1.0f,-2.0f);
Vec3 v7 = Vec3(1.0f,1.0f,-1.0f);
Vec3 point_0 = Vec3();
Figure* figurelist[12];
//sphere
figurelist[0]=new Sphere(Vec3(-0.2f,0.3f,-1.5f),0.3f,Vec3(1.000f, 0.196f, 0.000f));
figurelist[1]=new Sphere(Vec3(0.5f,-0.3f,-1.3f),0.4f,Vec3(0.054f, 0.172f, 0.847f));
//floor
figurelist[2]=new Triangle(v1,v0,v2,Vec3(0.752f, 0.713f, 0.823f));
figurelist[3]=new Triangle(v2,v3,v1,Vec3(0.752f, 0.713f, 0.823f));
//left
figurelist[4]=new Triangle(v5,v1,v0,Vec3(0.749f, 0.105f, 0.101f));
figurelist[5]=new Triangle(v0,v4,v5,Vec3(0.749f, 0.105f, 0.101f));
//back
figurelist[6]=new Triangle(v5,v1,v3,Vec3(0.925f, 0.639f, 0.454f));
figurelist[7]=new Triangle(v3,v6,v5,Vec3(0.925f, 0.639f, 0.454f));
//right
figurelist[8]=new Triangle(v7,v6,v3,Vec3(0.415f, 0.733f, 0.164f));
figurelist[9]=new Triangle(v3,v2,v7,Vec3(0.415f, 0.733f, 0.164f));
//top
figurelist[10]=new Triangle(v5,v6,v7,Vec3(0.925f, 0.639f, 0.454f));
figurelist[11]=new Triangle(v7,v4,v5,Vec3(0.925f, 0.639f, 0.454f));
int wResolution = 640;
int hResolution = 480;
// #rows = hResolution, #cols = wResolution
Image<Colour> image(hResolution, wResolution);
Vec3 llc= Vec3(-1.0,-1.0,-1.0);
Vec3 urc = Vec3(1.0,1.0,-1.0);
Vec3 CameraPos = Vec3(0,0,0);
Vec3 sphere_amient(0.960, 0.968, 0.811);
for (int row = 0; row < image.rows(); ++row) {
for (int col = 0; col < image.cols(); ++col) {
float u = float(row+0.5)/float(image.rows());
float v = float(col+0.5)/float(image.cols());
Vec3 PointPos = Vec3(llc(0) + u * (urc.x() - llc.x()), llc.y() + v * (urc.y() - llc.y()), -1);
Vec3 direction=(PointPos-CameraPos).normalized();
float minT = INFINITY;
int figureHit = -1;
float t0=0.0;
for (int k =0;k<sizeof (figurelist)/sizeof (figurelist[0]);k++){
bool hit = figurelist[k]->intersection(&t0,CameraPos,direction);
if(hit && t0<minT){
minT = t0;
figureHit = k;
}
if(figureHit != -1){
Vec3 p0 = CameraPos+minT*direction;
Vec3 lightSource=Vec3(2.0f,0.0f,-1.0f);
float lightIntensity=0.7f;
Vec3 diffuseColour(0.0f, 0.392f, 0.0f);
Vec3 specularColour(0.0,0.0,0.0);
float intensity = 0;
//ambient Colour for shadows
Vec3 AmbientColour = figurelist[figureHit]->cl.cross(Vec3(0.1f, 0.1f, 0.1f));
//Diffuse Lightning
Vec3 light_direction = (lightSource-p0).normalized();
Vec3 Normal = Vec3(figurelist[figureHit]->calculateNormal(p0,&intensity,&diffuseColour,&specularColour)).normalized();
float diffuse_term =std::max(0.0f,light_direction.dot(Normal));
Vec3 diffuse = (diffuseColour*lightIntensity*diffuse_term);
//Specular Highlights
Vec3 e = (p0-CameraPos).normalized();
Vec3 R = (e+light_direction).normalized();
float dot2 = std::max(0.0f,R.dot(Normal));
Vec3 specular = specularColour*lightIntensity*pow(dot2,intensity);
Vec3 shadow_direction = p0-light_direction;
float bias = 0.001f;
Vec3 p_shadow = p0+Normal;
//For hard shadows
int lightHit = -1;
for ( int i=0;i<sizeof (figurelist)/sizeof (figurelist[0]);i++){
bool lightRayHit = figurelist[i]->intersection(&t0,p_shadow,shadow_direction);
if(lightRayHit && t0<minT){
minT = t0;
lightHit = i;
}
}
if(lightHit != -1){
image(row,col) = AmbientColour;
}
else{
image(row,col) = [enter image description here][1]specular+diffuse;
}
}
else {
image(row,col)=white();
}
}
}
}
bmpwrite("../../out.bmp", image);
imshow(image);
return EXIT_SUCCESS;
}
Attached is the output image i am getting. Here image after applying shadow tracing:
And the original image without shadows:
This should be the problem:
Vec3 shadow_direction = p0-light_direction;
p0 is a position, light_direction is a direction, hence, the result is a position. But you are using it as a direction. Instead do:
Vec3 shadow_direction = -light_direction;
Also
float bias = 0.001f;
Vec3 p_shadow = p0+Normal;
was probably meant to be
Vec3 p_shadow = p0 + bias * Normal;

How to avoid extra calculations in fragment shader

im trying to fix this shader. the effects is a radial blur around a point position, passing from the cpu in a array. The calculations works fine for each point and generates de effect, but as you can see in this picture, for each loop the shader keep generate samples, and i dont know how to avoid. i only want the blur for each point in the array
#version 150
in vec2 varyingtexcoord;
uniform sampler2DRect tex0;
uniform int size;
float exposure = 0.79;
float decay = 0.9;
float density = .9;
float weight = .1;
int samples = 25;
out vec4 fragColor;
const int MAX_SAMPLES = 25;
const int N = 3;
uniform vec2 ligthPos [N];
int a = 1;
vec4 halo(vec2 pos){
float illuminationDecay = 1.2;
vec2 texCoord = varyingtexcoord;
vec2 current = pos.xy;
vec2 deltaTextCoord = texCoord - current;
deltaTextCoord *= 1.0 / float(samples) * density;
vec4 color = texture(tex0, texCoord);
for(int i=0; i < MAX_SAMPLES; i++){
texCoord -= deltaTextCoord;
vec4 sample = texture(tex0, texCoord);
sample *= illuminationDecay * weight;
color += sample;
illuminationDecay *= decay;
}
return color;
}
void main(){
vec4 accum = vec4(0.0);
for(int e = 0; e < N;e++){
vec2 current =ligthPos[e];
accum += halo(current);
}
fragColor = (accum) * exposure;
}
this is what happen:

Depth of field artefacts

I began to implement the depth of field in my application, but I ran into a problem. Artifacts appear in the form of a non-smooth transition between depths.
I'm doing the depth of field in the following way:
With the main scene rendering, I record the blur value in the alpha channel. I do this using this: fragColor.a = clamp(abs(focalDepth + fragPos.z) / focalRange, 0.0, 1.0), where focalDepth = 8, focalRange = 20.
After that I apply a two-step (horizontally and vertically) Gaussian blur with dynamic size and sigma, depending on the blur value (which I previously recorded in the alpha channel)(shader below)
But I have an artifact, where you see a clear transition between the depths.
The whole scene:
And with an increased scale:
My fragment blur shader:
#version 330
precision mediump float;
#define BLOOM_KERNEL_SIZE 8
#define DOF_KERNEL_SIZE 8
/* ^^^ definitions ^^^ */
layout (location = 0) out vec4 bloomFragColor;
layout (location = 1) out vec4 dofFragColor;
in vec2 texCoords;
uniform sampler2D image; // bloom
uniform sampler2D image2; // dof
uniform bool isHorizontal;
uniform float kernel[BLOOM_KERNEL_SIZE];
float dof_kernel[DOF_KERNEL_SIZE];
vec4 tmp;
vec3 bloom_result;
vec3 dof_result;
float fdof;
float dofSigma;
int dofSize;
void makeDofKernel(int size, float sigma) {
size = size * 2 - 1;
float tmpKernel[DOF_KERNEL_SIZE * 2 - 1];
int mean = size / 2;
float sum = 0; // For accumulating the kernel values
for (int x = 0; x < size; x++) {
tmpKernel[x] = exp(-0.5 * pow((x - mean) / sigma, 2.0));
// Accumulate the kernel values
sum += tmpKernel[x];
}
// Normalize the kernel
for (int x = 0; x < size; x++)
tmpKernel[x] /= sum;
// need center and right part
for (int i = 0; i < mean + 1; i++) dof_kernel[i] = tmpKernel[size / 2 + i];
}
void main() {
vec2 texOffset = 1.0 / textureSize(image, 0); // gets size of single texel
tmp = texture(image2, texCoords);
fdof = tmp.a;
dofSize = clamp(int(tmp.a * DOF_KERNEL_SIZE), 1, DOF_KERNEL_SIZE);
if (dofSize % 2 == 0) dofSize++;
makeDofKernel(dofSize, 12.0 * fdof + 1);
bloom_result = texture(image, texCoords).rgb * kernel[0]; // current fragment’s contribution
dof_result = tmp.rgb * dof_kernel[0];
if(isHorizontal) {
for(int i = 1; i < kernel.length(); i++) {
bloom_result += texture(image, texCoords + vec2(texOffset.x * i, 0.0)).rgb * kernel[i];
bloom_result += texture(image, texCoords - vec2(texOffset.x * i, 0.0)).rgb * kernel[i];
}
for(int i = 1; i < dofSize; i++) {
dof_result += texture(image2, texCoords + vec2(texOffset.x * i, 0.0)).rgb * dof_kernel[i];
dof_result += texture(image2, texCoords - vec2(texOffset.x * i, 0.0)).rgb * dof_kernel[i];
}
} else {
for(int i = 1; i < kernel.length(); i++) {
bloom_result += texture(image, texCoords + vec2(0.0, texOffset.y * i)).rgb * kernel[i];
bloom_result += texture(image, texCoords - vec2(0.0, texOffset.y * i)).rgb * kernel[i];
}
for(int i = 1; i < dofSize; i++) {
dof_result += texture(image2, texCoords + vec2(0.0, texOffset.y * i)).rgb * dof_kernel[i];
dof_result += texture(image2, texCoords - vec2(0.0, texOffset.y * i)).rgb * dof_kernel[i];
}
}
bloomFragColor = vec4(bloom_result, 1.0);
dofFragColor = vec4(dof_result, fdof);
}
And the settings for the DOF texture: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, SCR_W, SCR_H, 0, GL_RGBA, GL_FLOAT, NULL)
Optimization of the shader I'll do later, now I'm very concerned about this artifact. How it can be eliminated? It is desirable not to change the way of realization of the depth of field. But if you know a more productive way - a big request to share it.
I will be grateful for help.
The problem is solved. My mistake was that I changed the size of DOF blur kernel, although I had to change only the sigma. Corrected shader code:
#version 330
precision mediump float;
#define BLOOM_KERNEL_SIZE 8
#define DOF_KERNEL_SIZE 8
/* ^^^ definitions ^^^ */
layout (location = 0) out vec4 bloomFragColor;
layout (location = 1) out vec4 dofFragColor;
in vec2 texCoords;
uniform sampler2D image; // bloom
uniform sampler2D image2; // dof
uniform bool isHorizontal;
uniform float max_sigma = 12.0;
uniform float min_sigma = 0.0001;
uniform float kernel[BLOOM_KERNEL_SIZE];
float dof_kernel[DOF_KERNEL_SIZE];
vec4 tmp;
vec3 bloom_result;
vec3 dof_result;
float fdof;
const int DOF_LCR_SIZE = DOF_KERNEL_SIZE * 2 - 1; // left-center-right (lllcrrr)
const int DOF_MEAN = DOF_LCR_SIZE / 2;
void makeDofKernel(float sigma) {
float sum = 0; // For accumulating the kernel values
for (int x = DOF_MEAN; x < DOF_LCR_SIZE; x++) {
dof_kernel[x - DOF_MEAN] = exp(-0.5 * pow((x - DOF_MEAN) / sigma, 2.0));
// Accumulate the kernel values
sum += dof_kernel[x - DOF_MEAN];
}
sum += sum - dof_kernel[0];
// Normalize the kernel
for (int x = 0; x < DOF_KERNEL_SIZE; x++) dof_kernel[x] /= sum;
}
void main() {
vec2 texOffset = 1.0 / textureSize(image, 0); // gets size of single texel
tmp = texture(image2, texCoords);
fdof = tmp.a;
makeDofKernel(max_sigma * fdof + min_sigma);
bloom_result = texture(image, texCoords).rgb * kernel[0]; // current fragment’s contribution
dof_result = tmp.rgb * dof_kernel[0];
if(isHorizontal) {
for(int i = 1; i < BLOOM_KERNEL_SIZE; i++) {
bloom_result += texture(image, texCoords + vec2(texOffset.x * i, 0.0)).rgb * kernel[i];
bloom_result += texture(image, texCoords - vec2(texOffset.x * i, 0.0)).rgb * kernel[i];
}
for(int i = 1; i < DOF_KERNEL_SIZE; i++) {
dof_result += texture(image2, texCoords + vec2(texOffset.x * i, 0.0)).rgb * dof_kernel[i];
dof_result += texture(image2, texCoords - vec2(texOffset.x * i, 0.0)).rgb * dof_kernel[i];
}
} else {
for(int i = 1; i < BLOOM_KERNEL_SIZE; i++) {
bloom_result += texture(image, texCoords + vec2(0.0, texOffset.y * i)).rgb * kernel[i];
bloom_result += texture(image, texCoords - vec2(0.0, texOffset.y * i)).rgb * kernel[i];
}
for(int i = 1; i < DOF_KERNEL_SIZE; i++) {
dof_result += texture(image2, texCoords + vec2(0.0, texOffset.y * i)).rgb * dof_kernel[i];
dof_result += texture(image2, texCoords - vec2(0.0, texOffset.y * i)).rgb * dof_kernel[i];
}
}
bloomFragColor = vec4(bloom_result, 1.0);
dofFragColor = vec4(dof_result, fdof);
}
Result:

How to create billboard matrix in glm

How to create a billboard translation matrix from a point in space using glm?
Just set the upper left 3×3 submatrix of the transformation to identity.
Update: Fixed function OpenGL variant:
void makebillboard_mat4x4(double *BM, double const * const MV)
{
for(size_t i = 0; i < 3; i++) {
for(size_t j = 0; j < 3; j++) {
BM[4*i + j] = i==j ? 1 : 0;
}
BM[4*i + 3] = MV[4*i + 3];
}
for(size_t i = 0; i < 4; i++) {
BM[12 + i] = MV[12 + i];
}
}
void mygltoolMakeMVBillboard(void)
{
GLenum active_matrix;
double MV[16];
glGetIntegerv(GL_MATRIX_MODE, &active_matrix);
glMatrixMode(GL_MODELVIEW);
glGetDoublev(GL_MODELVIEW_MATRIX, MV);
makebillboard_mat4x4(MV, MV);
glLoadMatrixd(MV);
glMatrixMode(active_matrix);
}
mat4 billboard(vec3 position, vec3 cameraPos, vec3 cameraUp) {
vec3 look = normalize(cameraPos - position);
vec3 right = cross(cameraUp, look);
vec3 up2 = cross(look, right);
mat4 transform;
transform[0] = vec4(right, 0);
transform[1] = vec4(up2, 0);
transform[2] = vec4(look, 0);
// Uncomment this line to translate the position as well
// (without it, it's just a rotation)
//transform[3] = vec4(position, 0);
return transform;
}