Not getting correct position of Shadows , CPU based Raytracing - c++
I am trying to calculate shadows in a CPU based Raytracing, But i am not getting the shadows in the exact position,
I am trying to design famous Cornell Box,
Also the Spheres in the figure are being shaded improperly,
I have pasted the whole code.
I have used 2 spheres and 10 triangles in scene,
I guess there is problem with my shadow tracing algorithm or normal calculation
class Figure{
public:
Vec3 position;
Vec3 cl;
Vec3 normal;
Figure(void);
Figure(Vec3 pos,Vec3 col,Vec3 Normal);
virtual bool intersection(float* t,Vec3 origin,Vec3 direction);
virtual Vec3 calculateNormal(Vec3 p0,float *intensity,Vec3* Diffusecolor,Vec3* Specular);
virtual bool intersectionShadow(float* t,Vec3 origin,Vec3 direction);
};
Figure::Figure(){
position = Vec3(0,0,0);
cl = Vec3(0,0,0);
normal = Vec3(0,0,0);
}
Figure::Figure(Vec3 post, Vec3 coli,Vec3 Normal){
position = post;
cl = coli;
normal = Normal;
}
bool Figure::intersection(float *t, Vec3 origin,Vec3 direction){
return false;
}
Vec3 Figure::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
return normal;
}
bool Figure::intersectionShadow(float *t, Vec3 origin, Vec3 direction){
return false;
}
class Plane:public Figure{
public:
Vec3 planeNormal;
Plane(void);
Plane(Vec3 pos,Vec3 norm,Vec3 c);
bool intersection(float *t, Vec3 origin, Vec3 direction);
Vec3 Plane::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular);
};
Plane::Plane(void){
planeNormal=Vec3(0,0,0);
}
Plane::Plane(Vec3 pos,Vec3 norm,Vec3 c){
position = pos;
planeNormal = norm;
cl = c;
}
bool Plane::intersection(float *t, Vec3 origin, Vec3 direction){
float denom = planeNormal.dot(direction);
if(abs(denom)<0.0001f){
return false;
}
else{
Vec3 p_or = position-origin;
float res = p_or.dot(planeNormal)/denom;
*t = res;
}
}
Vec3 Plane::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
*intensity = 0;
*Diffusecolor = Vec3(0.7, 0.7, 0.7);
*Specular = cl;
return planeNormal;
}
class Sphere:public Figure{
public:
float radius;
Sphere(void);
Sphere(Vec3 pos,float rad,Vec3 col);
bool intersection(float* t,Vec3 origin,Vec3 direction);
Vec3 calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular);
};
Sphere::Sphere(){
position = Vec3(0,0,-2);
radius = 0.3f;
cl = Vec3(1.0,0,0);
}
Sphere::Sphere(Vec3 pos, float rad, Vec3 col){
position = pos;
radius = rad;
cl = col;
}
bool Sphere::intersection(float *t, Vec3 origin,Vec3 direction){
Vec3 oc = origin - position;
float a = direction.dot(direction);
float b = 2.0f * oc.dot(direction);
float c = oc.dot(oc) - radius*radius;
float discriminant = b*b - 4*a*c;
if (discriminant < 0) {
return false;
}
else {
float t0;
t0 = std::max((-b + sqrt(discriminant) ) / (2.0f*a),(-b - sqrt(discriminant) ) / (2.0f*a));
*t = t0;
return true;
}
}
Vec3 Sphere::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
*intensity = 50.0f;
*Diffusecolor = cl;
*Specular = Vec3(0.7f, 0.7f, 0.7);
return (p0-position);
}
class Triangle:public Figure{
public:
Vec3 v0;
Vec3 v1;
Vec3 v2;
Vec3 norm;
Vec3 ed0,ed1;
float u,v,w;
Triangle(void);
Triangle(Vec3 a,Vec3 b,Vec3 c,Vec3 col);
bool intersection(float* t,Vec3 origin,Vec3 direction);
bool intersectionShadow(float* t,Vec3 origin,Vec3 direction);
Vec3 calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular);
};
Triangle::Triangle(){
v0 = Vec3(0,0,0);
v1 = Vec3(0,0,0);
v2 = Vec3(0,0,0);
}
Triangle::Triangle(Vec3 a, Vec3 b, Vec3 c, Vec3 col){
v0 = a;
v1 = b;
v2 = c;
cl = col;
}
bool Triangle::intersection(float *t, Vec3 origin,Vec3 direction){
ed0 = v1-v0;
ed1 = v2-v0;
Vec3 r_o = origin-v0;
Vec3 r_ed = direction.cross(ed1);
u = r_o.dot(r_ed)/ed0.dot(r_ed);
Vec3 r0_ed0 = r_o.cross(ed0);
float rd_r0_ed0 = direction.dot(r0_ed0);
v = rd_r0_ed0/ed0.dot(r_ed);
float ed_r0_ed0 = ed1.dot(r0_ed0);
float t0 = ed_r0_ed0/ed0.dot(r_ed);
w = 1-u-v;
if((u<0) || (u>1)){
return false;
}
else if((v<0) || (u+v>1)){
return false;
}
else {
*t = t0;
return true;
}
}
Vec3 Triangle::calculateNormal(Vec3 p0, float *intensity, Vec3 *Diffusecolor, Vec3 *Specular){
*intensity = 0;
*Diffusecolor = Vec3(0.7, 0.7, 0.7);
*Specular = cl;
//https://www.tjhsst.edu/~dhyatt/supercomp/n310.html
Vec3 d1 = Vec3(v1.x()-v0.x(),v1.y()-v0.y(),v1.z()-v0.z());
Vec3 d2 = Vec3(v2.x()-v1.x(),v2.y()-v1.y(),v2.z()-v1.z());
Vec3 n = (d1.cross(d2));
return n ;
}
bool Triangle::intersectionShadow(float* t,Vec3 origin,Vec3 direction){
return false;
}
using Colour = Vec3; // RGB Value
Colour red() { return Colour(1.0f, 0.0f, 0.0f); }
Colour white() { return Colour(1.0f, 1.0f, 1.0f); }
Colour black() { return Colour(0.0f, 0.0f, 0.0f); }
uchar BoundPixelValue(int shading)
{
if (shading < 0) return 0;
if (shading >= 255) return 255;
return shading;
}
Vec3 scalar_multiply(Vec3 b,float v){
return Vec3(b.x()*v,b.y()*v,b.z()*v);
}
int main(int, char**){
Vec3 v0 = Vec3(-1.0f,-1.0f,-1.0f);
Vec3 v1 = Vec3(-1.0f,-1.0f,-2.0f);
Vec3 v2 = Vec3(-1.0f,1.0f,-1.0f);
Vec3 v3 = Vec3(-1.0f,1.0f,-2.0f);
Vec3 v4 = Vec3(1.0f,-1.0f,-1.0f);
Vec3 v5 = Vec3(1.0f,-1.0f,-2.0f);
Vec3 v6 = Vec3(1.0f,1.0f,-2.0f);
Vec3 v7 = Vec3(1.0f,1.0f,-1.0f);
Vec3 point_0 = Vec3();
Figure* figurelist[12];
//sphere
figurelist[0]=new Sphere(Vec3(-0.2f,0.3f,-1.5f),0.3f,Vec3(1.000f, 0.196f, 0.000f));
figurelist[1]=new Sphere(Vec3(0.5f,-0.3f,-1.3f),0.4f,Vec3(0.054f, 0.172f, 0.847f));
//floor
figurelist[2]=new Triangle(v1,v0,v2,Vec3(0.752f, 0.713f, 0.823f));
figurelist[3]=new Triangle(v2,v3,v1,Vec3(0.752f, 0.713f, 0.823f));
//left
figurelist[4]=new Triangle(v5,v1,v0,Vec3(0.749f, 0.105f, 0.101f));
figurelist[5]=new Triangle(v0,v4,v5,Vec3(0.749f, 0.105f, 0.101f));
//back
figurelist[6]=new Triangle(v5,v1,v3,Vec3(0.925f, 0.639f, 0.454f));
figurelist[7]=new Triangle(v3,v6,v5,Vec3(0.925f, 0.639f, 0.454f));
//right
figurelist[8]=new Triangle(v7,v6,v3,Vec3(0.415f, 0.733f, 0.164f));
figurelist[9]=new Triangle(v3,v2,v7,Vec3(0.415f, 0.733f, 0.164f));
//top
figurelist[10]=new Triangle(v5,v6,v7,Vec3(0.925f, 0.639f, 0.454f));
figurelist[11]=new Triangle(v7,v4,v5,Vec3(0.925f, 0.639f, 0.454f));
int wResolution = 640;
int hResolution = 480;
// #rows = hResolution, #cols = wResolution
Image<Colour> image(hResolution, wResolution);
Vec3 llc= Vec3(-1.0,-1.0,-1.0);
Vec3 urc = Vec3(1.0,1.0,-1.0);
Vec3 CameraPos = Vec3(0,0,0);
Vec3 sphere_amient(0.960, 0.968, 0.811);
for (int row = 0; row < image.rows(); ++row) {
for (int col = 0; col < image.cols(); ++col) {
float u = float(row+0.5)/float(image.rows());
float v = float(col+0.5)/float(image.cols());
Vec3 PointPos = Vec3(llc(0) + u * (urc.x() - llc.x()), llc.y() + v * (urc.y() - llc.y()), -1);
Vec3 direction=(PointPos-CameraPos).normalized();
float minT = INFINITY;
int figureHit = -1;
float t0=0.0;
for (int k =0;k<sizeof (figurelist)/sizeof (figurelist[0]);k++){
bool hit = figurelist[k]->intersection(&t0,CameraPos,direction);
if(hit && t0<minT){
minT = t0;
figureHit = k;
}
if(figureHit != -1){
Vec3 p0 = CameraPos+minT*direction;
Vec3 lightSource=Vec3(2.0f,0.0f,-1.0f);
float lightIntensity=0.7f;
Vec3 diffuseColour(0.0f, 0.392f, 0.0f);
Vec3 specularColour(0.0,0.0,0.0);
float intensity = 0;
//ambient Colour for shadows
Vec3 AmbientColour = figurelist[figureHit]->cl.cross(Vec3(0.1f, 0.1f, 0.1f));
//Diffuse Lightning
Vec3 light_direction = (lightSource-p0).normalized();
Vec3 Normal = Vec3(figurelist[figureHit]->calculateNormal(p0,&intensity,&diffuseColour,&specularColour)).normalized();
float diffuse_term =std::max(0.0f,light_direction.dot(Normal));
Vec3 diffuse = (diffuseColour*lightIntensity*diffuse_term);
//Specular Highlights
Vec3 e = (p0-CameraPos).normalized();
Vec3 R = (e+light_direction).normalized();
float dot2 = std::max(0.0f,R.dot(Normal));
Vec3 specular = specularColour*lightIntensity*pow(dot2,intensity);
Vec3 shadow_direction = p0-light_direction;
float bias = 0.001f;
Vec3 p_shadow = p0+Normal;
//For hard shadows
int lightHit = -1;
for ( int i=0;i<sizeof (figurelist)/sizeof (figurelist[0]);i++){
bool lightRayHit = figurelist[i]->intersection(&t0,p_shadow,shadow_direction);
if(lightRayHit && t0<minT){
minT = t0;
lightHit = i;
}
}
if(lightHit != -1){
image(row,col) = AmbientColour;
}
else{
image(row,col) = [enter image description here][1]specular+diffuse;
}
}
else {
image(row,col)=white();
}
}
}
}
bmpwrite("../../out.bmp", image);
imshow(image);
return EXIT_SUCCESS;
}
Attached is the output image i am getting. Here image after applying shadow tracing:
And the original image without shadows:
This should be the problem:
Vec3 shadow_direction = p0-light_direction;
p0 is a position, light_direction is a direction, hence, the result is a position. But you are using it as a direction. Instead do:
Vec3 shadow_direction = -light_direction;
Also
float bias = 0.001f;
Vec3 p_shadow = p0+Normal;
was probably meant to be
Vec3 p_shadow = p0 + bias * Normal;
Related
Slower read/write to shared memory in CUDA than in Compute Shader [closed]
Closed. This question needs debugging details. It is not currently accepting answers. Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question. Closed 4 months ago. Improve this question I am currently comparing the implementation of a n-body simulation in the GPU using CUDA and OpenGL (Compute Shaders) for a project, but I run into a problem using shared memory. First I implemented the version with no shared memory as follows: CUDA #include "helper_math.h" //... __device__ float dist2(float3 A, float3 B) { float3 C = A - B; return dot(C, C); } __global__ void n_body_vel_calc(float3* positions, float3* velocities, unsigned numParticles, float mass, float deltaTime) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= numParticles) return; const float G = 6.6743e-11f; float3 cur_position = positions[i]; float3 force = make_float3(0.0f, 0.0f, 0.0f); for (unsigned j = 0; j < numParticles; ++j) { if (i == j) continue; float3 neighbor_position = positions[j]; float inv_distance2 = 1.0f / dist2(cur_position, neighbor_position); float3 direction = normalize(neighbor_position - cur_position); force += G * mass * mass * inv_distance2 * direction; } float3 acceleration = force / mass; velocities[i] += acceleration * deltaTime; } OpenGL // glBufferStorage(GL_SHADER_STORAGE_BUFFER, ..., ..., ...); #version 460 layout(local_size_x=128) in; layout(location = 0) uniform int numParticles; layout(location = 1) uniform float mass; layout(location = 2) uniform float dt; layout(std430, binding=0) buffer pblock { vec3 positions[]; }; layout(std430, binding=1) buffer vblock { vec3 velocities[]; }; float dist2(vec3 A, vec3 B) { vec3 C = A - B; return dot( C, C ); } void main() { int i = int(gl_GlobalInvocationID); if (i >= numParticles) return; const float G = 6.6743e-11f; vec3 cur_position = positions[i]; vec3 force = vec3(0.0); for (uint j = 0; j < numParticles; ++j) { if (i == j) continue; vec3 neighbor_position = positions[j]; float inv_distance2 = 1.0 / dist2(cur_position, neighbor_position); vec3 direction = normalize(neighbor_position - cur_position); force += G * mass * mass * inv_distance2 * direction; } vec3 acceleration = force / mass; velocities[i] += acceleration * dt; } With the same number of threads per group, number of particles and the same number of times executing the kernel, the CUDA version takes 82 ms and OpengGL takes 70 ms. Weird thing that there speed is much different, but I can attribute that to GLSL having geometric operations optimized somehow. My problem comes next, when I write the versions with shared memory, which should increase the performance by not reading from global memory multiple times. CUDA __global__ void n_body_vel_calc(float3* positions, float3 * velocities, unsigned workgroupSize, unsigned numParticles, float mass, float deltaTime) { // size of array == workgroupSize extern __shared__ float3 temp_tile[]; unsigned i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= numParticles) return; const float G = 6.6743e-11f; float3 cur_position = positions[i]; float3 force = make_float3(0.0f, 0.0f, 0.0f); for (unsigned tile = 0; tile < numParticles; tile += workgroupSize) { temp_tile[threadIdx.x] = positions[tile + threadIdx.x]; __syncthreads(); for (unsigned j = 0; j < workgroupSize; ++j) { if (i == j || ((tile + j) >= numParticles)) continue; float3 neighbor_position = temp_tile[j]; float inv_distance2 = 1.0f / dist2(cur_position, neighbor_position); float3 direction = normalize(neighbor_position - cur_position); force += G * mass * mass * inv_distance2 * direction; } __syncthreads(); } float3 acceleration = force / mass; velocities[i] += acceleration * deltaTime; } OpenGL #version 460 layout(local_size_x=128) in; layout(location = 0) uniform int numParticles; layout(location = 1) uniform float mass; layout(location = 2) uniform float dt; layout(std430, binding=0) buffer pblock { vec3 positions[]; }; layout(std430, binding=1) buffer vblock { vec3 velocities[]; }; // Shared variables shared vec3 temp_tile[gl_WorkGroupSize.x]; void main() { int i = int(gl_GlobalInvocationID); if (i >= numParticles) return; const float G = 6.6743e-11f; vec3 cur_position = positions[i]; vec3 force = vec3(0.0); for (uint tile = 0; tile < numParticles; tile += gl_WorkGroupSize.x) { temp_tile[gl_LocalInvocationIndex] = positions[tile + gl_LocalInvocationIndex]; groupMemoryBarrier(); barrier(); for (uint j = 0; j < gl_WorkGroupSize.x; ++j) { if (i == j || (tile + j) >= numParticles) continue; vec3 neighbor_position = temp_tile[j]; float inv_distance2 = 1.0 / dist2(cur_position, neighbor_position); vec3 direction = normalize(neighbor_position - cur_position); force += G * mass * mass * inv_distance2 * direction; } groupMemoryBarrier(); barrier(); } vec3 acceleration = force / mass; velocities[i] += acceleration * dt; } My principal problem comes next. With the same parameters as above, the CUDA version increases its execution time to 128 ms (greatly diminishing its performance), and the OpenGL one took 68 (a small improvement over the other version). I have compiled the CUDA version with the toolkit version 11.7 and 10.0 with MSVC V143 and V142 and the results are more or less the same. Why the OpenGL implementation is faster with shared memory, but the CUDA one its not? Am I missing something?
Why are my Ray march fragment shader refelction texture lookups slowing my frame rate?
I’ve written a Fragment shader in GLSL, using shader toy. Link : https://www.shadertoy.com/view/wtGSzy most of it works, but when I enable texture lookups in the reflection function, the performance drops from 60FPS to 5~FPS. The code in question is on lines 173 - 176 if(SDFObjectToDraw.texChannelID == 0) col = texture(iChannel0, uv); if(SDFObjectToDraw.texChannelID == 1) col = texture(iChannel1, uv); This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. It only causes issues in the reflection function. My question is, why are my texture lookups, in the reflection code, dropping my performance this much and what can I do to improve it? /** * Return the normalized direction to march in from the eye point for a single pixel. * * fieldOfView: vertical field of view in degrees * size: resolution of the output image * fragCoord: the x,y coordinate of the pixel in the output image */ vec3 rayDirection(float fieldOfView, vec2 size, vec2 fragCoord) { vec2 xy = fragCoord - size / 2.0; float z = size.y / tan(radians(fieldOfView) / 2.0); return normalize(vec3(xy, -z)); } float start = 0.0; vec3 eye = vec3(0,0,5); int MAX_MARCHING_STEPS = 255; float EPSILON = 0.00001; float end = 10.0; const uint Shpere = 1u; const uint Box = 2u; const uint Plane = 4u; vec3 lightPos = vec3(-10,0,5); #define M_PI 3.1415926535897932384626433832795 const int SDF_OBJECT_COUNT = 4; struct SDFObject { uint Shape; vec3 Position; float Radius; int texChannelID; float Ambiant; float Spec; float Diff; vec3 BoxSize; bool isMirror; //quick hack to get refletions working }; SDFObject SDFObjects[SDF_OBJECT_COUNT] = SDFObject[SDF_OBJECT_COUNT]( SDFObject(Shpere, vec3(2,0,-3),1.0,0,0.2,0.2,0.8, vec3(0,0,0),true) ,SDFObject(Shpere, vec3(-2,0,-3),1.0,0,0.1,1.0,1.0, vec3(0,0,0),false) ,SDFObject(Box, vec3(0,0,-6),0.2,1,0.2,0.2,0.8, vec3(1.0,0.5,0.5),false) ,SDFObject(Plane, vec3(0,0,0),1.0,1,0.2,0.2,0.8, vec3(0.0,1.0,0.0),false) ); float shereSDF(vec3 p, SDFObject o) { return length(p-o.Position)-o.Radius; } float boxSDF(vec3 pointToTest, vec3 boxBoundery, float radius, vec3 boxPos) { vec3 q = abs(pointToTest - boxPos) - boxBoundery; return length(max(q,0.0)) + min(max(q.x, max(q.y,q.z)) ,0.0) -radius; } float planeSDF(vec3 p, vec4 n, vec3 Pos) { return dot(p-Pos, n.xyz) + n.w; } bool IsShadow(vec3 LightPos, vec3 HitPos) { bool isShadow = false; vec3 viewRayDirection = normalize(lightPos- HitPos) ; float depth = start; vec3 hitpoint; for(int i=0; i<MAX_MARCHING_STEPS; i++) { hitpoint = (HitPos+ depth * viewRayDirection); float dist = end; for(int j =0; j<SDF_OBJECT_COUNT; j++) { float distToObjectBeingConsidered; if(SDFObjects[j].Shape == Shpere) distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]); if(SDFObjects[j].Shape == Box) distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position); if(SDFObjects[j].Shape == Plane) distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position); if( distToObjectBeingConsidered < dist) { dist = distToObjectBeingConsidered; } } if(dist < EPSILON) { isShadow = true; } depth += dist; if(depth >= end) { isShadow = false; } } return isShadow; } vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore) { vec3 returnCol; vec3 reflectedRay = reflect(inComingRay, surfNormal); vec3 RayDirection = normalize(reflectedRay) ; float depth = start; vec3 hitpoint; int i; for(i=0; i<MAX_MARCHING_STEPS; i++) { hitpoint = (HitPos+ depth * RayDirection); SDFObject SDFObjectToDraw; float dist = end; for(int j =0; j<SDF_OBJECT_COUNT; j++) { float distToObjectBeingConsidered; if(SDFObjects[j].Shape == Shpere) distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]); if(SDFObjects[j].Shape == Box) distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position); if(SDFObjects[j].Shape == Plane) distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position); if( distToObjectBeingConsidered < dist && j!= objectIndexToIgnore )// D > 0.0) { dist = distToObjectBeingConsidered; SDFObjectToDraw = SDFObjects[j]; } } if(dist < EPSILON) { vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position); float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI)); float v = 0.5+ (asin(normal.y)/(M_PI)); vec2 uv =vec2(u,v); vec4 col = vec4(0,0.5,0.5,0); ///>>>>>>>>>>>> THESE LINES ARE broken, WHY? //if(SDFObjectToDraw.texChannelID == 0) //col = texture(iChannel0, uv); //if(SDFObjectToDraw.texChannelID == 1) //col = texture(iChannel1, uv); vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position); float theta = dot(normal,NormalizedDirToLight); vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal); vec3 viewDir = normalize(SDFObjectToDraw.Position); float Spec = dot(reflectionOfLight, viewDir); if(IsShadow(lightPos, hitpoint)) { returnCol= (col.xyz*SDFObjectToDraw.Ambiant); } else { returnCol= (col.xyz*SDFObjectToDraw.Ambiant) +(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant)); } break; } depth += dist; if(depth >= end) { //should look up bg texture here but cant be assed right now returnCol = vec3(1.0,0.0,0.0); break; } } return returnCol;//*= (vec3(i+1)/vec3(MAX_MARCHING_STEPS)); } vec3 rayMarch(vec2 fragCoord) { vec3 viewRayDirection = rayDirection(45.0, iResolution.xy, fragCoord); float depth = start; vec3 hitpoint; vec3 ReturnColour = vec3(0,0,0); for(int i=0; i<MAX_MARCHING_STEPS; i++) { hitpoint = (eye+ depth * viewRayDirection); float dist = end; SDFObject SDFObjectToDraw; int objectInDexToIgnore=-1; //find closest objecct to current point for(int j =0; j<SDF_OBJECT_COUNT; j++) { float distToObjectBeingConsidered; if(SDFObjects[j].Shape == Shpere) distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]); if(SDFObjects[j].Shape == Box) distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position); if(SDFObjects[j].Shape == Plane) distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position); if( distToObjectBeingConsidered < dist) { dist = distToObjectBeingConsidered; SDFObjectToDraw = SDFObjects[j]; objectInDexToIgnore = j; } } //if we are close enough to an objectoto hit it. if(dist < EPSILON) { vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position); if(SDFObjectToDraw.isMirror) { ReturnColour = MirrorReflection( viewRayDirection, normal, hitpoint, objectInDexToIgnore); } else { float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI)); float v = 0.5+ (asin(normal.y)/(M_PI)); vec2 uv =vec2(u,v); vec4 col; if(SDFObjectToDraw.texChannelID == 0) col = texture(iChannel0, uv); if(SDFObjectToDraw.texChannelID == 1) col = texture(iChannel1, uv); vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position); float theta = dot(normal,NormalizedDirToLight); vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal); vec3 viewDir = normalize(SDFObjectToDraw.Position); float Spec = dot(reflectionOfLight, viewDir); if(IsShadow(lightPos, hitpoint)) { ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant); } else { ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant) +(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant)); //+(col.xyz* Spec * SDFObjectToDraw.Spec); } } return ReturnColour; } depth += dist; if(depth >= end) { float u = fragCoord.x/ iResolution.x; float v = fragCoord.y/ iResolution.y; vec4 col = texture(iChannel2, vec2(u,v)); ReturnColour =col.xyz; } } return ReturnColour; } void mainImage( out vec4 fragColor, in vec2 fragCoord ) { // Normalized pixel coordinates (from 0 to 1) //vec2 uv = fragCoord/iResolution.xy; // Time varying pixel color //vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4)); // Output to screen lightPos *= cos(iTime+vec3(1.5,2,2)); //lightPos= vec3(cos(iTime)*2.0,0,0); vec3 SDFCol= rayMarch(fragCoord); vec3 col = vec3(0); //if(SDFVal <=1.0) // col = vec3(1,0,0); //col = vec3(SDFVal,0,0); col = vec3(0.5,0,0); col = SDFCol; fragColor = vec4(col,1.0); }
[...] This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. [...] The "working" texture lookup is executed in a loop in rayMarch. MAX_MARCHING_STEPS is 255, so the lookup is done at most 255 times. vec3 rayMarch(vec2 fragCoord) { // [...] for(int i=0; i<MAX_MARCHING_STEPS; i++) { // [...] if(SDFObjectToDraw.texChannelID == 0) col = texture(iChannel0, uv); if(SDFObjectToDraw.texChannelID == 1) col = texture(iChannel1, uv); // [...] } // [...] } When you do the lookup in MirrorReflection then the performance breaks down, because it is done in a loop in MirrorReflection and MirrorReflection is called in a loop in rayMarch. In this case the lookup is done up to 255*255 = 65025 times. ~65000 texture lookups for a fragment is far to much and cause the break down of performance. vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore) { // [...] for(i=0; i<MAX_MARCHING_STEPS; i++) { // [...] if(SDFObjectToDraw.texChannelID == 0) col = texture(iChannel0, uv); if(SDFObjectToDraw.texChannelID == 1) col = texture(iChannel1, uv); // [...] } // [...] } vec3 rayMarch(vec2 fragCoord) { // [...] for(int i=0; i<MAX_MARCHING_STEPS; i++) { // [...] ReturnColour = MirrorReflection(viewRayDirection, normal, hitpoint, objectInDexToIgnore); // [...] } // [...] }
How to avoid extra calculations in fragment shader
im trying to fix this shader. the effects is a radial blur around a point position, passing from the cpu in a array. The calculations works fine for each point and generates de effect, but as you can see in this picture, for each loop the shader keep generate samples, and i dont know how to avoid. i only want the blur for each point in the array #version 150 in vec2 varyingtexcoord; uniform sampler2DRect tex0; uniform int size; float exposure = 0.79; float decay = 0.9; float density = .9; float weight = .1; int samples = 25; out vec4 fragColor; const int MAX_SAMPLES = 25; const int N = 3; uniform vec2 ligthPos [N]; int a = 1; vec4 halo(vec2 pos){ float illuminationDecay = 1.2; vec2 texCoord = varyingtexcoord; vec2 current = pos.xy; vec2 deltaTextCoord = texCoord - current; deltaTextCoord *= 1.0 / float(samples) * density; vec4 color = texture(tex0, texCoord); for(int i=0; i < MAX_SAMPLES; i++){ texCoord -= deltaTextCoord; vec4 sample = texture(tex0, texCoord); sample *= illuminationDecay * weight; color += sample; illuminationDecay *= decay; } return color; } void main(){ vec4 accum = vec4(0.0); for(int e = 0; e < N;e++){ vec2 current =ligthPos[e]; accum += halo(current); } fragColor = (accum) * exposure; } this is what happen:
Omni-directional light in raytracing program gives wrong render c++
I am trying to implement an omni-directional light source (a.k.a., point light source) in my raytracing program in C++. I am not getting the expected results, but I can't figure out the problem. Maybe someone can see what I am doing wrong. I have included the two functions that are responsible for raytracing and the light. The ClosestIntersection function finds the closest intersection and a triangle. That is used later in the DirectLight function. I would really appreciate any help. #include <iostream> #include <glm/glm.hpp> #include <SDL.h> #include "SDLauxiliary.h" #include "TestModel.h" #include "math.h" using namespace std; using glm::vec3; using glm::mat3; // ---------------------------------------------------------------------------- // GLOBAL VARIABLES const int SCREEN_WIDTH = 500; const int SCREEN_HEIGHT = 500; SDL_Surface* screen; int t; vector<Triangle> triangles; float focalLength = 900; vec3 cameraPos(0, 0, -4.5); vec3 lightPos(0.5, 0.5, 0); vec3 lightColor = 14.f * vec3(1,1,1); // Translate camera float translation = 0.1; // use this to set translation increment // Rotate camera float yaw; vec3 trueCameraPos; const float PI = 3.1415927; // ---------------------------------------------------------------------------- // CLASSES class Intersection; // ---------------------------------------------------------------------------- // FUNCTIONS void Update(); void Draw(); bool ClosestIntersection(vec3 start, vec3 dir, const vector<Triangle>& triangles, Intersection& closestIntersection); vec3 DirectLight(const Intersection& i); // ---------------------------------------------------------------------------- // STRUCTURES struct Intersection { vec3 position; float distance; int triangleIndex; }; float m = std::numeric_limits<float>::max(); int main(int argc, char* argv[]) { LoadTestModel(triangles); screen = InitializeSDL(SCREEN_WIDTH, SCREEN_HEIGHT); t = SDL_GetTicks(); // Set start value for timer. while (NoQuitMessageSDL()) { Update(); Draw(); } SDL_SaveBMP(screen, "screenshot.bmp"); return 0; } void Update() { // Compute frame time: int t2 = SDL_GetTicks(); float dt = float(t2 - t); t = t2; cout << "Render time: " << dt << " ms." << endl; } } void Draw() { if (SDL_MUSTLOCK(screen)) SDL_LockSurface(screen); for (int y = 0; y<SCREEN_HEIGHT; ++y) { for (int x = 0; x < SCREEN_WIDTH; ++x) { vec3 start = cameraPos; vec3 dir(x - SCREEN_WIDTH / 2, y - SCREEN_HEIGHT / 2, focalLength); Intersection intersection; if (ClosestIntersection(start, dir, triangles, intersection)) { //vec3 theColor = triangles[intersection.triangleIndex].color; vec3 theColor = DirectLight(intersection); PutPixelSDL(screen, x, y, theColor); } else { vec3 color(0, 0, 0); PutPixelSDL(screen, x, y, color); } } } if (SDL_MUSTLOCK(screen)) SDL_UnlockSurface(screen); SDL_UpdateRect(screen, 0, 0, 0, 0); } bool ClosestIntersection(vec3 s, vec3 d, const vector<Triangle>& triangles, Intersection& closestIntersection) { closestIntersection.distance = m; for (size_t i = 0; i < triangles.size(); i++) { vec3 v0 = triangles[i].v0; vec3 v1 = triangles[i].v1; vec3 v2 = triangles[i].v2; vec3 u = v1 - v0; vec3 v = v2 - v0; vec3 b = s - v0; vec3 x; // Determinant of A = [-d u v] float det = -d.x * ((u.y * v.z) - (v.y * u.z)) - u.x * ((-d.y * v.z) - (v.y * -d.z)) + v.x * ((-d.y * u.z) - (u.y * -d.z)); // Cramer'r Rule for t = x.x x.x = (b.x * ((u.y * v.z) - (v.y * u.z)) - u.x * ((b.y * v.z) - (v.y * b.z)) + v.x * ((b.y * u.z) - (u.y * b.z))) / det; if (x.x >= 0) { // Cramer'r Rule for u = x.y x.y = (-d.x * ((b.y * v.z) - (v.y * b.z)) - b.x * ((-d.y * v.z) - (v.y * -d.z)) + v.x * ((-d.y * b.z) - (b.y * -d.z))) / det; // Cramer'r Rule for v = x.z x.z = (-d.x * ((u.y * b.z) - (b.y * u.z)) - u.x * ((-d.y * b.z) - (b.y * -d.z)) + b.x * ((-d.y * u.z) - (u.y * -d.z))) / det; if (x.y >= 0 && x.z >= 0 && x.y + x.z <= 1 && x.x < closestIntersection.distance) { closestIntersection.position = x; closestIntersection.distance = x.x; closestIntersection.triangleIndex = i; } } } //end of for loop if (closestIntersection.distance != m) { return true; } else { return false; } } vec3 DirectLight(const Intersection& i) { vec3 n = triangles[i.triangleIndex].normal; vec3 r = lightPos - i.position; float R2 = r.x * r.x + r.y * r.y + r.z * r.z; vec3 D = (lightColor * fmaxf((glm::dot(glm::normalize(r), n)), 0)) / (4 * PI * R2); return D; }
If I'm understanding the code in ClosestIntersection correctly, here's what it's doing for each triangle: Let u,v be the vectors from one vertex of the triangle to the other two vertices. Let d be (the reverse of) the direction of the ray we're considering. And let b be the vector from that vertex of the triangle to the camera. Find p,q,r so that b = pd+qu+rv (p,q,r are what your code calls x.x, x.y, x.z). Now the ray meets the triangle if p>0, q>=0, r>=0, q+r<=1 and the distance to the intersection point is p. So, the conditions on q,r make sense; the idea is that b-qu-rv is the vector from the camera to the relevant point in the triangle and it's in direction d. Your distances aren't really distances, but along a single ray they're the same multiple of the actual distance, which means that this works fine for determining which triangle you've hit, and that's all you use them for. So far, so good. But then you say closestIntersection.position = x; and surely that's all wrong, because this x isn't in the same coordinate system as your camera location, triangle vertices, etc. It's in this funny "how much of d, how much of u, how much of v" coordinate system which isn't even the same from one triangle to the next. (Which is why you are getting discontinuities at triangle boundaries even within a single face, I think.) Try setting it to v0+x.y*(v1-v0)+x.z*(v2-v0) instead (I think this is right; it's meant to be the actual point where the ray crosses the triangle, in the same coordinates as all your other points) and see what it does.
This isn't a super-great answer, but I managed to make your code work without the strange shading discontinuities. The problem happens in ClosestIntersection and maybe Gareth's answer covers it. I need to stop looking at this now, but I wanted to show you what I have before I leave, and I need an Answer to post some code. // This starts with some vec3 helper functions which make things // easier to look at float Dot(const vec3& a, const vec3& b) { return a.x * b.x + a.y * b.y + a.z * b.z; } vec3 Cross(const vec3& a, const vec3& b) { return vec3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x); } float L2(const vec3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } float Abs(const vec3& v) { return std::sqrt(L2(v)); } // Here is the replacement version of ClosestIntersection bool ClosestIntersection(vec3 cam, vec3 dir, const vector<Triangle>& triangles, Intersection& closestIntersection) { closestIntersection.distance = m; vec3 P0 = cam; vec3 P1 = cam + dir; for (size_t i = 0; i < triangles.size(); ++i) { vec3 v0 = triangles[i].v0; vec3 v1 = triangles[i].v1; vec3 v2 = triangles[i].v2; // Dan Sunday // http://geomalgorithms.com/a06-_intersect-2.html vec3 u = v1 - v0; vec3 v = v2 - v0; // w = P-v0, solve w = su +tv (s, t are parametric scalars) vec3 n = Cross(u, v); float ri = Dot(n, (v0 - P0)) / Dot(n, (P1 - P0)); vec3 Pi = P0 + ri * (P1- P0); vec3 w = Pi - v0; // s = w . (n x v) / (u . (n x v)) // t = w . (n x u) / (v . (n x u)) float s = Dot(w, Cross(n, v)) / Dot(u, Cross(n, v)); float t = Dot(w, Cross(n, u)) / Dot(v, Cross(n, u)); if(s >= 0 && t >= 0 && s+t <= 1) { float dist = Abs(cam - Pi); if(dist < closestIntersection.distance) { closestIntersection.position = Pi; closestIntersection.distance = dist; closestIntersection.triangleIndex = int(i); } } } return closestIntersection.distance != m; } Good luck.
Uniform optimization ( - 1 values)
Consider following fragment shader: uniform PolygonData { int count; float points[1024]; } polygon; out vec4 outColor; void main() { float j; for (int i = 0; i < polygon.count; ++i) { j = polygon.points[i++]; j = polygon.points[i++]; j = polygon.points[i++]; } outColor = vec4(1, 1, j, 1); } Why is polygon.count and polygon.points optimized out?
#GuyRT Yes. That was it. From there I got the ingormation that I was exceeding the maximum uniform array size. This is what i was doing: Vertex: in vec3 inPosition; void main(void) { gl_Position = vec4(inPosition, 1.0); } Geometry: layout(lines) in; layout(triangle_strip) out; layout(max_vertices = 4) out; out vec3 worldPos; uniform mat4 u_proj; void main() { vec4 pos0 = u_proj * gl_in[0].gl_Position; vec4 pos1 = u_proj * gl_in[1].gl_Position; //left up gl_Position.x = pos0.x; gl_Position.y = pos1.y; gl_Position.w = pos0.w; worldPos = gl_Position.xyz; EmitVertex(); //left down gl_Position = pos0; worldPos = gl_Position.xyz; EmitVertex(); //right up gl_Position = pos1; worldPos = gl_Position.xyz; EmitVertex(); //right down gl_Position.x = pos1.x; gl_Position.y = pos0.y; gl_Position.w = pos1.w; worldPos = gl_Position.xyz; EmitVertex(); EndPrimitive(); } Fragment: struct PolyData { int count; float points[256]; }; uniform PolyData p; uniform mat4 u_proj; in vec3 worldPos; out vec4 outColor; void main() { float testx = worldPos.x; float testy = worldPos.y; bool c = false; int i; int j; for (i = 0, j = p.count - 4; i < p.count; j = i, i = i + 3) { vec4 i_vec = u_proj * vec4(p.points[i], p.points[i + 1], 0, 1.0); vec4 j_vec = u_proj * vec4(p.points[j], p.points[j + 1], 0, 1.0); if ( (i_vec.y >= testy) != (j_vec.y >= testy) && (testx <= (j_vec.x - i_vec.x) * (testy - i_vec.y) / (j_vec.y - i_vec.y) + i_vec.x)) c = !c; } if (c == true) { outColor = vec4(1, 1, 0, 1); } else { outColor = vec4(0, 0, 0, 0); } } Thanks for all the help.