GPU ray casting (single pass) with 3d textures in spherical coordinates - opengl
i' am implementing an algorithm of volume rendering "GPU ray casting single pass". For this, i used a float array of intensity values as 3d textures ( this 3d textures describes a regular 3d grid in spherical coordinates ).
Here there are example of array values:
75.839354473071637,
64.083049468866022,
65.253933716444365,
79.992431196592577,
84.411485976957096,
0.0000000000000000,
82.020319431382831,
76.808403454586994,
79.974774618246158,
0.0000000000000000,
91.127273013466336,
84.009956557448433,
90.221356094672814,
87.567422484025627,
71.940263118478072,
0.0000000000000000,
0.0000000000000000,
74.487058398181944,
..................,
..................
(Here the complete data:[link] (https://drive.google.com/file/d/1lbXzRucUseF-ITzFgxqeLTd0WglJJOoz/view?usp=sharing))
the dimensions of spherical grid are (r,theta,phi)=(384,15,768), and this is the input format for load textures:
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, 384, 15, 768, 0, GL_RED, GL_FLOAT, dataArray)
And This is an image of my visualization:
The problem is that the visuization should be a disk, or at least a similar form.
i think that the problme is i do not specify correctly the coordinates for textures( in spherical coordinates).
this is the vertex shader code:
#version 330 core
layout(location = 0) in vec3 vVertex; //object space vertex position
//uniform
uniform mat4 MVP; //combined modelview projection matrix
smooth out vec3 vUV; //3D texture coordinates for texture lookup in the fragment shader
void main()
{
//get the clipspace position
gl_Position = MVP*vec4(vVertex.xyz,1);
//get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space
//vertex position. Since the unit cube is at origin (min: (-0.5,-0.5,-0.5) and max: (0.5,0.5,0.5))
//adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to
//(1,1,1)
vUV = vVertex + vec3(0.5);
}
and this is the fragmen shader code:
#version 330 core
layout(location = 0) out vec4 vFragColor; //fragment shader output
smooth in vec3 vUV; //3D texture coordinates form vertex shader
//interpolated by rasterizer
//uniforms
uniform sampler3D volume; //volume dataset
uniform vec3 camPos; //camera position
uniform vec3 step_size; //ray step size
//constants
const int MAX_SAMPLES = 300; //total samples for each ray march step
const vec3 texMin = vec3(0); //minimum texture access coordinate
const vec3 texMax = vec3(1); //maximum texture access coordinate
vec4 colour_transfer(float intensity)
{
vec3 high = vec3(100.0, 20.0, 10.0);
// vec3 low = vec3(0.0, 0.0, 0.0);
float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);
return vec4(intensity * high, alpha);
}
void main()
{
//get the 3D texture coordinates for lookup into the volume dataset
vec3 dataPos = vUV;
//Getting the ray marching direction:
//get the object space position by subracting 0.5 from the
//3D texture coordinates. Then subtraact it from camera position
//and normalize to get the ray marching direction
vec3 geomDir = normalize((vUV-vec3(0.5)) - camPos);
//multiply the raymarching direction with the step size to get the
//sub-step size we need to take at each raymarching step
vec3 dirStep = geomDir * step_size;
//flag to indicate if the raymarch loop should terminate
bool stop = false;
//for all samples along the ray
for (int i = 0; i < MAX_SAMPLES; i++) {
// advance ray by dirstep
dataPos = dataPos + dirStep;
stop = dot(sign(dataPos-texMin),sign(texMax-dataPos)) < 3.0;
//if the stopping condition is true we brek out of the ray marching loop
if (stop)
break;
// data fetching from the red channel of volume texture
float sample = texture(volume, dataPos).r;
vec4 c = colour_transfer(sample);
vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a * vFragColor.rgb;
vFragColor.a = c.a + (1 - c.a) * vFragColor.a;
//early ray termination
//if the currently composited colour alpha is already fully saturated
//we terminated the loop
if( vFragColor.a>0.99)
break;
}
}
How can i specific the coordinates for i will visualize the information in the 3d textures, in spherical cordinates?
UPDATE:
vertex shader :
#version 330 core
layout(location = 0) in vec3 vVertex; //object space vertex position
//uniform
uniform mat4 MVP; //combined modelview projection matrix
smooth out vec3 vUV; //3D texture coordinates for texture lookup in the fragment shader
void main()
{
//get the clipspace position
gl_Position = MVP*vec4(vVertex.xyz,1);
//get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space
//vertex position. Since the unit cube is at origin (min: (-0.5,- 0.5,-0.5) and max: (0.5,0.5,0.5))
//adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to
//(1,1,1)
vUV = vVertex + vec3(0.5);
}
And fragment shader:
#version 330 core
#define Pi 3.1415926535897932384626433832795
layout(location = 0) out vec4 vFragColor; //fragment shader output
smooth in vec3 vUV; //3D texture coordinates form vertex shader
//interpolated by rasterizer
//uniforms
uniform sampler3D volume; //volume dataset
uniform vec3 camPos; //camera position
uniform vec3 step_size; //ray step size
//constants
const int MAX_SAMPLES = 200; //total samples for each ray march step
const vec3 texMin = vec3(0); //minimum texture access coordinate
const vec3 texMax = vec3(1); //maximum texture access coordinate
// transfer function that asigned a color and alpha from sample intensity
vec4 colour_transfer(float intensity)
{
vec3 high = vec3(100.0, 20.0, 10.0);
// vec3 low = vec3(0.0, 0.0, 0.0);
float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);
return vec4(intensity * high, alpha);
}
// this function transform vector in spherical coordinates from cartesian
vec3 cart2Sphe(vec3 cart){
vec3 sphe;
sphe.x = sqrt(cart.x*cart.x+cart.y*cart.y+cart.z*cart.z);
sphe.z = atan(cart.y/cart.x);
sphe.y = atan(sqrt(cart.x*cart.x+cart.y*cart.y)/cart.z);
return sphe;
}
void main()
{
//get the 3D texture coordinates for lookup into the volume dataset
vec3 dataPos = vUV;
//Getting the ray marching direction:
//get the object space position by subracting 0.5 from the
//3D texture coordinates. Then subtraact it from camera position
//and normalize to get the ray marching direction
vec3 vec=(vUV-vec3(0.5));
vec3 spheVec=cart2Sphe(vec); // transform position to spherical
vec3 sphePos=cart2Sphe(camPos); //transform camPos to spherical
vec3 geomDir= normalize(spheVec-sphePos); // ray direction
//multiply the raymarching direction with the step size to get the
//sub-step size we need to take at each raymarching step
vec3 dirStep = geomDir * step_size ;
//flag to indicate if the raymarch loop should terminate
//for all samples along the ray
for (int i = 0; i < MAX_SAMPLES; i++) {
// advance ray by dirstep
dataPos = dataPos + dirStep;
float sample;
convert texture coordinates
vec3 spPos;
spPos.x=dataPos.x/384;
spPos.y=(dataPos.y+(Pi/2))/Pi;
spPos.z=dataPos.z/(2*Pi);
// get value from texture
sample = texture(volume,dataPos).r;
vec4 c = colour_transfer(sample)
// alpha blending function
vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a * vFragColor.rgb;
vFragColor.a = c.a + (1 - c.a) * vFragColor.a;
if( vFragColor.a>1.0)
break;
}
// vFragColor.rgba = texture(volume,dataPos);
}
these are the point that generate a boundary cube:
glm::vec3 vertices[8] = {glm::vec3(-0.5f, -0.5f, -0.5f),
glm::vec3(0.5f, -0.5f, -0.5f),
glm::vec3(0.5f, 0.5f, -0.5f),
glm::vec3(-0.5f, 0.5f, -0.5f),
glm::vec3(-0.5f, -0.5f, 0.5f),
glm::vec3(0.5f, -0.5f, 0.5f),
glm::vec3(0.5f, 0.5f, 0.5f),
glm::vec3(-0.5f, 0.5f, 0.5f)};
//unit cube indices
GLushort cubeIndices[36] = {0, 5, 4,
5, 0, 1,
3, 7, 6,
3, 6, 2,
7, 4, 6,
6, 4, 5,
2, 1, 3,
3, 1, 0,
3, 0, 7,
7, 0, 4,
6, 5, 2,
2, 5, 1};
this is the visualization that it is generated:
I do not know what and how are you rendering. There are many techniques and configurations which can achieve them. I am usually using a single pass single quad render covering the screen/view while geometry/scene is passed as texture. As you have your object in a 3D texture then I think you should go this way too. This is how its done (Assuming perspective, uniform spherical voxel grid as a 3D texture):
CPU side code
simply render single QUAD covering the scene/view. To make this more simple and precise I recommend you to use your sphere local coordinate system for camera matrix which is passed to the shaders (it will ease up the ray/sphere intersections computations a lot).
Vertex
here you should cast/compute the ray position and direction for each vertex and pass it to the fragment so its interpolated for each pixel on the screen/view.
So the camera is described by its position (focal point) and view direction (usually Z- axis in perspective OpenGL). The ray is casted from the focal point (0,0,0) in camera local coordinates into the znear plane (x,y,-znear) also in camera local coordinates. Where x,y is the pixel screen position wit aspect ratio corrections applied if screen/view is not a square.
So you just convert these two points into sphere local coordinates (still Cartesian).
The ray direction is just substraction of the two points...
Fragment
first normalize ray direction passed from vertex (as due to interpolation it will not be unit vector). After that simply test ray/sphere intersection for each radius of the sphere voxel grid from outward to inward so test spheres from rmax to rmax/n where rmax is the max radius your 3D texture can have and n is ids resolution for axis corresponding to radius r.
On each hit convert the Cartesian intersection position to Spherical coordinates. Convert them to texture coordinates s,t,p and fetch the Voxel intensity and apply it to the color (how depends on what and how are you rendering).
So if your texture coordinates are (r,theta,phi)assuming phi is longitude and angles are normalized to <-Pi,Pi> and <0,2*Pi> and rmax is the max radius of the 3D texture then:
s = r/rmax
t = (theta+(Pi/2))/Pi
p = phi/(2*PI)
If your sphere is not transparent then stop on first hit with not empty Voxel intensity. Otherwise update ray start position and do this whole bullet again until ray goes out of the scene BBOX or no intersection occurs.
You can also add Snell's law (add reflection refraction) by splitting ray on object boundary hits...
Here are some related QAs using this technique or having valid info that will help you achieve this:
GLSL atmospheric scattering this is almost the same as you should do.
ray and ellipsoid intersection accuracy improvement math for the intersections
Curved Frosted Glass Shader? sub surface scattering
GLSL back raytrace through 3D mesh reflections and refractions in geometry inside 2D texture
GLSL back raytrace through 3D volume 3D Cartesian volume inside 3D texture
[Edit1] example (after the input 3D texture was finally posted
So when I put all the stuff above (and in comments) together I come up with this.
CPU side code:
//---------------------------------------------------------------------------
//--- GLSL Raytrace system ver: 1.000 ---------------------------------------
//---------------------------------------------------------------------------
#ifndef _raytrace_spherical_volume_h
#define _raytrace_spherical_volume_h
//---------------------------------------------------------------------------
class SphericalVolume3D
{
public:
bool _init; // has been initiated ?
GLuint txrvol; // SphericalVolume3D texture at GPU side
int xs,ys,zs;
float eye[16]; // direct camera matrix
float aspect,focal_length;
SphericalVolume3D() { _init=false; txrvol=-1; xs=0; ys=0; zs=0; aspect=1.0; focal_length=1.0; }
SphericalVolume3D(SphericalVolume3D& a) { *this=a; }
~SphericalVolume3D() { gl_exit(); }
SphericalVolume3D* operator = (const SphericalVolume3D *a) { *this=*a; return this; }
//SphericalVolume3D* operator = (const SphericalVolume3D &a) { ...copy... return this; }
// init/exit
void gl_init();
void gl_exit();
// render
void glsl_draw(GLint prog_id);
};
//---------------------------------------------------------------------------
void SphericalVolume3D::gl_init()
{
if (_init) return; _init=true;
// load 3D texture from file into CPU side memory
int hnd,siz; BYTE *dat;
hnd=FileOpen("Texture3D_F32.dat",fmOpenRead);
siz=FileSeek(hnd,0,2);
FileSeek(hnd,0,0);
dat=new BYTE[siz];
FileRead(hnd,dat,siz);
FileClose(hnd);
if (0)
{
int i,n=siz/sizeof(GLfloat);
GLfloat *p=(GLfloat*)dat;
for (i=0;i<n;i++) p[i]=100.5;
}
// copy it to GPU as 3D texture
// glClampColorARB(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE);
// glClampColorARB(GL_CLAMP_READ_COLOR_ARB, GL_FALSE);
// glClampColorARB(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE);
glGenTextures(1,&txrvol);
glEnable(GL_TEXTURE_3D);
glBindTexture(GL_TEXTURE_3D,txrvol);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE,GL_MODULATE);
xs=384;
ys= 15;
zs=768;
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, xs,ys,zs, 0, GL_RED, GL_FLOAT, dat);
glBindTexture(GL_TEXTURE_3D,0);
glDisable(GL_TEXTURE_3D);
delete[] dat;
}
//---------------------------------------------------------------------------
void SphericalVolume3D::gl_exit()
{
if (!_init) return; _init=false;
glDeleteTextures(1,&txrvol);
}
//---------------------------------------------------------------------------
void SphericalVolume3D::glsl_draw(GLint prog_id)
{
GLint ix;
const int txru_vol=0;
glUseProgram(prog_id);
// uniforms
ix=glGetUniformLocation(prog_id,"zoom" ); glUniform1f(ix,1.0);
ix=glGetUniformLocation(prog_id,"aspect" ); glUniform1f(ix,aspect);
ix=glGetUniformLocation(prog_id,"focal_length"); glUniform1f(ix,focal_length);
ix=glGetUniformLocation(prog_id,"vol_xs" ); glUniform1i(ix,xs);
ix=glGetUniformLocation(prog_id,"vol_ys" ); glUniform1i(ix,ys);
ix=glGetUniformLocation(prog_id,"vol_zs" ); glUniform1i(ix,zs);
ix=glGetUniformLocation(prog_id,"vol_txr" ); glUniform1i(ix,txru_vol);
ix=glGetUniformLocation(prog_id,"tm_eye" ); glUniformMatrix4fv(ix,1,false,eye);
glActiveTexture(GL_TEXTURE0+txru_vol);
glEnable(GL_TEXTURE_3D);
glBindTexture(GL_TEXTURE_3D,txrvol);
// this should be a VAO/VBO
glColor4f(1.0,1.0,1.0,1.0);
glBegin(GL_QUADS);
glVertex2f(-1.0,-1.0);
glVertex2f(-1.0,+1.0);
glVertex2f(+1.0,+1.0);
glVertex2f(+1.0,-1.0);
glEnd();
glActiveTexture(GL_TEXTURE0+txru_vol);
glBindTexture(GL_TEXTURE_3D,0);
glDisable(GL_TEXTURE_3D);
glUseProgram(0);
}
//---------------------------------------------------------------------------
#endif
//---------------------------------------------------------------------------
call init on app start when GL is already inited, exit before app exit while GL still works and draw when needed... The code is C++/VCL based so port to your environment (file access, strings, etc..) I also use the 3D texture in binary form as loading 85MByte ASCII file is a bit too much for my taste.
Vertex:
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform float aspect;
uniform float focal_length;
uniform float zoom;
uniform mat4x4 tm_eye;
layout(location=0) in vec2 pos;
out smooth vec3 ray_pos; // ray start position
out smooth vec3 ray_dir; // ray start direction
//------------------------------------------------------------------
void main(void)
{
vec4 p;
// perspective projection
p=tm_eye*vec4(pos.x/(zoom*aspect),pos.y/zoom,0.0,1.0);
ray_pos=p.xyz;
p-=tm_eye*vec4(0.0,0.0,-focal_length,1.0);
ray_dir=normalize(p.xyz);
gl_Position=vec4(pos,0.0,1.0);
}
//------------------------------------------------------------------
its more or less a copy from the volumetric ray tracer link.
Fragment:
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
// Ray tracer ver: 1.000
//------------------------------------------------------------------
in smooth vec3 ray_pos; // ray start position
in smooth vec3 ray_dir; // ray start direction
uniform int vol_xs, // texture resolution
vol_ys,
vol_zs;
uniform sampler3D vol_txr; // scene mesh data texture
out layout(location=0) vec4 frag_col;
//---------------------------------------------------------------------------
// compute length of ray(p0,dp) to intersection with ellipsoid((0,0,0),r) -> view_depth_l0,1
// where r.x is elipsoid rx^-2, r.y = ry^-2 and r.z=rz^-2
float view_depth_l0=-1.0,view_depth_l1=-1.0;
bool _view_depth(vec3 _p0,vec3 _dp,vec3 _r)
{
double a,b,c,d,l0,l1;
dvec3 p0,dp,r;
p0=dvec3(_p0);
dp=dvec3(_dp);
r =dvec3(_r );
view_depth_l0=-1.0;
view_depth_l1=-1.0;
a=(dp.x*dp.x*r.x)
+(dp.y*dp.y*r.y)
+(dp.z*dp.z*r.z); a*=2.0;
b=(p0.x*dp.x*r.x)
+(p0.y*dp.y*r.y)
+(p0.z*dp.z*r.z); b*=2.0;
c=(p0.x*p0.x*r.x)
+(p0.y*p0.y*r.y)
+(p0.z*p0.z*r.z)-1.0;
d=((b*b)-(2.0*a*c));
if (d<0.0) return false;
d=sqrt(d);
l0=(-b+d)/a;
l1=(-b-d)/a;
if (abs(l0)>abs(l1)) { a=l0; l0=l1; l1=a; }
if (l0<0.0) { a=l0; l0=l1; l1=a; }
if (l0<0.0) return false;
view_depth_l0=float(l0);
view_depth_l1=float(l1);
return true;
}
//---------------------------------------------------------------------------
const float pi =3.1415926535897932384626433832795;
const float pi2=6.2831853071795864769252867665590;
float atanxy(float x,float y) // atan2 return < 0 , 2.0*M_PI >
{
int sx,sy;
float a;
const float _zero=1.0e-30;
sx=0; if (x<-_zero) sx=-1; if (x>+_zero) sx=+1;
sy=0; if (y<-_zero) sy=-1; if (y>+_zero) sy=+1;
if ((sy==0)&&(sx==0)) return 0;
if ((sx==0)&&(sy> 0)) return 0.5*pi;
if ((sx==0)&&(sy< 0)) return 1.5*pi;
if ((sy==0)&&(sx> 0)) return 0;
if ((sy==0)&&(sx< 0)) return pi;
a=y/x; if (a<0) a=-a;
a=atan(a);
if ((x>0)&&(y>0)) a=a;
if ((x<0)&&(y>0)) a=pi-a;
if ((x<0)&&(y<0)) a=pi+a;
if ((x>0)&&(y<0)) a=pi2-a;
return a;
}
//---------------------------------------------------------------------------
void main(void)
{
float a,b,r,_rr,c;
const float dr=1.0/float(vol_ys); // r step
const float saturation=1000.0; // color saturation voxel value
vec3 rr,p=ray_pos,dp=normalize(ray_dir);
for (c=0.0,r=1.0;r>1e-10;r-=dr) // check all radiuses inwards
{
_rr=1.0/(r*r); rr=vec3(_rr,_rr,_rr);
if (_view_depth(p,dp,rr)) // if ray hits sphere
{
p+=view_depth_l0*dp; // shift ray start position to the hit
a=atanxy(p.x,p.y); // comvert to spherical a,b,r
b=asin(p.z/r);
if (a<0.0) a+=pi2; // correct ranges...
b+=0.5*pi;
a/=pi2;
b/=pi;
// here do your stuff
c=texture(vol_txr,vec3(b,r,a)).r;// fetch voxel
if (c>saturation){ c=saturation; break; }
break;
}
}
c/=saturation;
frag_col=vec4(c,c,c,1.0);
}
//---------------------------------------------------------------------------
its a slight modification of the volumetric ray tracer link.
Beware that I assume that the axises inside the texture are:
latitude,r,longitude
implied by the resolutions (longitude should be double resolution of the latitude) so if it does not match your data just reorder the axises in fragment ... I have no clue what the values of the Voxel cell mean so I sum them like intensity/density for the final color and once saturation sum reached stop the raytrace but instead you should your computation stuff you intend.
Here preview:
I used this camera matrix eye for it:
// globals
SphericalVolume3D vol;
// init (GL must be already working)
vol.gl_init();
// render
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_CULL_FACE);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0,0.0,-2.5);
glGetFloatv(GL_MODELVIEW_MATRIX,vol.eye);
vol.glsl_draw(prog_id);
glFlush();
SwapBuffers(hdc);
// exit (GL must be still working)
vol.gl_init();
The ray/sphere hit is working properly, also the hit position in spherical coordinates are working as should so the only thing left is the axis order and color arithmetics ...
Related
Raylib drawing a SDF to a render texture
I'm trying to draw a circle SDF to a render texture, just a 64 radius circle SDF, centered at the mouse position. I've setup the code to render the contents of the render texture to the screen, which is 1366x768, same as the render texture size. I setup the render texture like this: lighting_buffer = LoadRenderTexture(screen_width, screen_height); I don't get any errors from Raylib. As far as I can tell, the resulting circle is way bigger than it should be, and warped, it's wider than it is tall. I have recorded a video of what the resulting render texture looks like when I move around the mouse: https://youtu.be/fXxpW-UwYJo Here is the output that I get of the contents of the render texture (the mouse is at the coords approx. 511x466): Here's my pixel shader code (using default vertex shader) // Input vertex attributes (from vertex shader) in vec2 fragTexCoord; in vec4 fragColor; // Output fragment color out vec4 finalColor; uniform vec2 position; uniform float radius; uniform vec4 colour; uniform float intensity; void main() { vec2 circle_pos = position; circle_pos.y = 768 - position.y; vec2 sample_pos = (fragTexCoord * vec2(1366, 768)) - circle_pos; float sdf = length(sample_pos) - radius; finalColor = vec4(sdf,sdf,sdf,1); } Here's my rendering code: void render_light(Vector2 position, float radius, Vector4 colour, float intensity) { SetShaderValueV(shader_lighting, GetShaderLocation(shader_lighting, "position"), (const void *)&position, RL_SHADER_UNIFORM_VEC2, 1); SetShaderValue(shader_lighting, GetShaderLocation(shader_lighting, "radius"), (const void *)&radius, RL_SHADER_UNIFORM_FLOAT); SetShaderValueV(shader_lighting, GetShaderLocation(shader_lighting, "colour"), (const void *)&colour, RL_SHADER_UNIFORM_VEC4, 1); SetShaderValue(shader_lighting, GetShaderLocation(shader_lighting, "intensity"), (const void *)&intensity, RL_SHADER_UNIFORM_FLOAT); Rectangle rect = { 0, 0, screen_width, screen_height }; DrawRectangleRec(rect, RAYWHITE); } void process_lighting() { BeginShaderMode(shader_lighting); BeginTextureMode(lighting_buffer); ClearBackground(BLACK); render_light({mouse_pos.x, mouse_pos.y}, 64.0f, {1.0f, 0.0f, 0.0f, 1.0f}, 5.0f); EndTextureMode(); EndShaderMode(); }
OpenGL shapes look darker when camera is below them
I have a problem with rendering my quads in OpenGL. They look darker when translucency is applied, if the camera is below a certain point. How can I fix this? The objects are lots of quads with tiny amounts of Z difference. I have implemented rendering of translucent objects from this webpage: http://www.alecjacobson.com/weblog/?p=2750 Render code: double alpha_factor = 0.75; double alpha_frac = (r_alpha - alpha_factor * r_alpha) / (1.0 - alpha_factor * r_alpha); double prev_alpha = r_alpha; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_BLEND); // quintuple pass to get the rendering of translucent objects, somewhat correct // reverse render order for getting alpha going! // 1st pass: only depth checks glDisable(GL_CULL_FACE); glDepthFunc(GL_LESS); r_alpha = 0; // send alpha for each pass // reverse order drawobjects(RENDER_REVERSE); // 2nd pass: guaranteed back face display with normal alpha glEnable(GL_CULL_FACE); glCullFace(GL_FRONT); glDepthFunc(GL_ALWAYS); r_alpha = alpha_factor * (prev_alpha + 0.025); // reverse order drawobjects(RENDER_REVERSE); // 3rd pass: depth checked version of fraction of calculated alpha. (minus 1) glEnable(GL_CULL_FACE); glCullFace(GL_FRONT); glDepthFunc(GL_LEQUAL); r_alpha = alpha_frac + 0.025; // normal order drawobjects(RENDER_NORMAL); // 4th pass: same for back face glEnable(GL_CULL_FACE); glCullFace(GL_BACK); glDepthFunc(GL_ALWAYS); r_alpha = alpha_factor * (prev_alpha + 0.025); // reverse order drawobjects(RENDER_REVERSE); // 5th pass: just put out the entire thing now glDisable(GL_CULL_FACE); glDepthFunc(GL_LEQUAL); r_alpha = alpha_frac + 0.025; // normal order drawobjects(RENDER_NORMAL); glDisable(GL_BLEND); r_alpha = prev_alpha; GLSL shaders: Vertex shader: #version 330 core layout(location = 0) in vec3 vPos_ModelSpace; layout(location = 1) in vec2 vertexUV; layout(location = 2) in mat4 model_instance; out vec2 UV; out float alpha; flat out uint alpha_mode; // model + view + proj matrix uniform mat4 proj; uniform mat4 view; uniform float v_alpha; uniform uint v_alpha_mode; void main() { gl_Position = proj * view * model_instance * vec4(vPos_ModelSpace, 1.0); // send to frag shader UV = vertexUV; alpha = v_alpha; alpha_mode = v_alpha_mode; } Fragment shader: #version 330 core // texture UV coordinate in vec2 UV; in float alpha; flat in uint alpha_mode; out vec4 color; // Values that stay constant for the whole mesh. uniform sampler2D texSampler; void main() { int amode = int(alpha_mode); color.rgb = texture(texSampler, UV).rgb; color.a = alpha; if(amode == 1) color.rgb *= alpha; } Image when problem happens: Image comparison for how it should look regardless of my position:
The reason it fades away in the center is because when you look at the infinitely thin sides of the planes they disappear. As for the brightness change top vs bottom, it's due to how your passes treat surface normals. The dark planes are normals facing away from the camera but with no planes facing the camera to lighten them up. It looks like you are rendering many translucent planes in a cube to estimate a volume. Here is a simple example of a volume rendering: https://www.shadertoy.com/view/lsG3D3 http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch39.html is a fantastic resource. It explains different ways to render volume, shows how awesome it is. For reference, that last example used a sphere as proxy geometry to raymarch a volume fractal. Happy coding!
3D texture get hidden when viewed from different angle
I have encountered a problem of rendering artifacts of 3D texture as below: I have searched on net as to find solution of this problem, and most answer pointed towards the problem in regards of depth buffer bit. While i have tried to change the depth buffer bit to 24 bit from GL_DEPTH to GL_STENCIL in GLUT, the result remains the same as the texture(or geometry-not really sure) get hidden when viewed from certain angle.. So, can i know what is exactly the problem that results in this kind of artifacts?? Below is the fragment shader code snippet(OpenGL Development Cookbook) void main() { //get the 3D texture coordinates for lookup into the volume dataset vec3 dataPos = vUV; vec3 geomDir = normalize((vec3(0.556,0.614,0.201)*vUV-vec3(0.278,0.307,0.1005)) - camPos); vec3 dirStep = geomDir * step_size; //flag to indicate if the raymarch loop should terminate bool stop = false; //for all samples along the ray for (int i = 0; i < MAX_SAMPLES; i++) { // advance ray by dirstep dataPos = dataPos + dirStep; stop = dot(sign(dataPos-texMin),sign(texMax-dataPos)) < 3.0f; //if the stopping condition is true we brek out of the ray marching loop if (stop) break; // data fetching from the red channel of volume texture float sample = texture(volume, dataPos).r; float prev_alpha = sample - (sample * vFragColor.a); vFragColor.rgb = (prev_alpha) * vec3(sample) + vFragColor.rgb; vFragColor.a += prev_alpha; if( vFragColor.a>0.99) break; } FYI, below is the vertex shader snippet: #version 330 core layout(location = 0) in vec3 vVertex; //object space vertex position //uniform uniform mat4 MVP; //combined modelview projection matrix smooth out vec3 vUV; //3D texture coordinates for texture lookup in the fragment shader void main() { //get the clipspace position gl_Position = MVP*vec4(vVertex.xyz,1); //get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space //vertex position. Since the unit cube is at origin (min: (-0.5,-0.5,-0.5) and max: (0.5,0.5,0.5)) //adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to //(1,1,1) //vUV = (vVertex + vec3(0.278,0.307,0.1005))/vec3(0.556,0.614,0.201); vUV = vVertex/vec3(0.556,0.614,0.201);//after moving the cube to coordinates range of 0-1 } EDITED: The artifacts present especially when viewing is done relatively at the edge. FYI, glm::perspective(45.0f,(float)w/h, 1.0f,10.0f);
OpenGL Projective Texture Mapping via Shaders
I am trying to implement a simple projective texture mapping approach by using shaders in OpenGL 3+. While there are some examples on the web I am having trouble creating a working example with shaders. I am actually planning on using two shaders, one which does a normal scene draw, and another for projective texture mapping. I have a function for drawing a scene void ProjTextureMappingScene::renderScene(GLFWwindow *window) and I am using glUseProgram() to switch between shaders. The normal drawing works fine. However, it is unclear to me how I am supposed to render the projective texture on top of an already textured cube. Do I somehow have to use a stencil buffer or a framebuffer object(the rest of the scene should be unaffected)? I also don't think that my projective texture mapping shaders are correct since the second time I render a cube it shows black. Further, I tried to debug by using colors and only the t component of the shader seems to be non-zero(so the cube appears green). I am overriding the texColor in the fragment shader below just for debugging purposes. VertexShader #version 330 uniform mat4 TexGenMat; uniform mat4 InvViewMat; uniform mat4 P; uniform mat4 MV; uniform mat4 N; layout (location = 0) in vec3 inPosition; //layout (location = 1) in vec2 inCoord; layout (location = 2) in vec3 inNormal; out vec3 vNormal, eyeVec; out vec2 texCoord; out vec4 projCoords; void main() { vNormal = (N * vec4(inNormal, 0.0)).xyz; vec4 posEye = MV * vec4(inPosition, 1.0); vec4 posWorld = InvViewMat * posEye; projCoords = TexGenMat * posWorld; // only needed for specular component // currently not used eyeVec = -posEye.xyz; gl_Position = P * MV * vec4(inPosition, 1.0); } FragmentShader #version 330 uniform sampler2D projMap; uniform sampler2D gSampler; uniform vec4 vColor; in vec3 vNormal, lightDir, eyeVec; //in vec2 texCoord; in vec4 projCoords; out vec4 outputColor; struct DirectionalLight { vec3 vColor; vec3 vDirection; float fAmbientIntensity; }; uniform DirectionalLight sunLight; void main (void) { // supress the reverse projection if (projCoords.q > 0.0) { vec2 finalCoords = projCoords.st / projCoords.q; vec4 vTexColor = texture(gSampler, finalCoords); // only t has non-zero values..why? vTexColor = vec4(finalCoords.s, finalCoords.t, finalCoords.r, 1.0); //vTexColor = vec4(projCoords.s, projCoords.t, projCoords.r, 1.0); float fDiffuseIntensity = max(0.0, dot(normalize(vNormal), -sunLight.vDirection)); outputColor = vTexColor*vColor*vec4(sunLight.vColor * (sunLight.fAmbientIntensity + fDiffuseIntensity), 1.0); } } Creation of TexGen Matrix biasMatrix = glm::mat4(0.5f, 0, 0, 0.5f, 0, 0.5f, 0, 0.5f, 0, 0, 0.5f, 0.5f, 0, 0, 0, 1); // 4:3 perspective with 45 fov projectorP = glm::perspective(45.0f * zoomFactor, 4.0f / 3.0f, 0.1f, 1000.0f); projectorOrigin = glm::vec3(-3.0f, 3.0f, 0.0f); projectorTarget = glm::vec3(0.0f, 0.0f, 0.0f); projectorV = glm::lookAt(projectorOrigin, // projector origin projectorTarget, // project on object at origin glm::vec3(0.0f, 1.0f, 0.0f) // Y axis is up ); mModel = glm::mat4(1.0f); ... texGenMatrix = biasMatrix * projectorP * projectorV * mModel; invViewMatrix = glm::inverse(mModel*mModelView); Render Cube Again It is also unclear to me what the modelview of the cube should be? Should it use the view matrix from the slide projector(as it is now) or the normal view projector? Currently the cube is rendered black(or green if debugging) in the middle of the scene view, as it would appear from the slide projector(I made a toggle hotkey so that I can see what the slide projector "sees"). The cube also moves with the view. How do I get the projection unto the cube itself? mModel = glm::translate(projectorV, projectorOrigin); // bind projective texture tTextures[2].bindTexture(); // set all uniforms ... // bind VBO data and draw glBindVertexArray(uiVAOSceneObjects); glDrawArrays(GL_TRIANGLES, 6, 36); Switch between main scene camera and slide projector camera if (useMainCam) { mCurrent = glm::mat4(1.0f); mModelView = mModelView*mCurrent; mProjection = *pipeline->getProjectionMatrix(); } else { mModelView = projectorV; mProjection = projectorP; }
I have solved the problem. One issue I had is that I confused the matrices in the two camera systems (world and projective texture camera). Now when I set the uniforms for the projective texture mapping part I use the correct matrices for the MVP values - the same ones I use for the world scene. glUniformMatrix4fv(iPTMProjectionLoc, 1, GL_FALSE, glm::value_ptr(*pipeline->getProjectionMatrix())); glUniformMatrix4fv(iPTMNormalLoc, 1, GL_FALSE, glm::value_ptr(glm::transpose(glm::inverse(mCurrent)))); glUniformMatrix4fv(iPTMModelViewLoc, 1, GL_FALSE, glm::value_ptr(mCurrent)); glUniformMatrix4fv(iTexGenMatLoc, 1, GL_FALSE, glm::value_ptr(texGenMatrix)); glUniformMatrix4fv(iInvViewMatrix, 1, GL_FALSE, glm::value_ptr(invViewMatrix)); Further, the invViewMatrix is just the inverse of the view matrix not the model view (this didn't change the behaviour in my case, since the model was identity, but it is wrong). For my project I only wanted to selectively render a few objects with projective textures. To do this, for each object, I must make sure that the current shader program is the one for projective textures using glUseProgram(projectiveTextureMappingProgramID). Next, I compute the required matrices for this object: texGenMatrix = biasMatrix * projectorP * projectorV * mModel; invViewMatrix = glm::inverse(mView); Coming back to the shaders, the vertex shader is correct except that I re-added the UV texture coordinates (inCoord) for the current object and stored them in texCoord. For the fragment shader I changed the main function to clamp the projective texture so that it doesn't repeat (I couldn't get it to work with the client side GL_CLAMP_TO_EDGE) and I am also using the default object texture and UV coordinates in case the projector does not cover the whole object (I also removed lighting from the projective texture since it is not needed in my case): void main (void) { vec2 finalCoords = projCoords.st / projCoords.q; vec4 vTexColor = texture(gSampler, texCoord); vec4 vProjTexColor = texture(projMap, finalCoords); //vec4 vProjTexColor = textureProj(projMap, projCoords); float fDiffuseIntensity = max(0.0, dot(normalize(vNormal), -sunLight.vDirection)); // supress the reverse projection if (projCoords.q > 0.0) { // CLAMP PROJECTIVE TEXTURE (for some reason gl_clamp did not work...) if(projCoords.s > 0 && projCoords.t > 0 && finalCoords.s < 1 && finalCoords.t < 1) //outputColor = vProjTexColor*vColor*vec4(sunLight.vColor * (sunLight.fAmbientIntensity + fDiffuseIntensity), 1.0); outputColor = vProjTexColor*vColor; else outputColor = vTexColor*vColor*vec4(sunLight.vColor * (sunLight.fAmbientIntensity + fDiffuseIntensity), 1.0); } else { outputColor = vTexColor*vColor*vec4(sunLight.vColor * (sunLight.fAmbientIntensity + fDiffuseIntensity), 1.0); } } If you are stuck and for some reason can not get the shaders to work, you can check out an example in "OpenGL 4.0 Shading Language Cookbook" (textures chapter) - I actually missed this, until I got it working by myself. In addition to all of the above, a great help for debugging if the algorithm is working correctly was to draw the frustum (as wireframe) for the projective camera. I used a shader for frustum drawing. The fragment shader just assigns a solid color, while the vertex shader is listed below with explanations: #version 330 // input vertex data layout(location = 0) in vec3 vp; uniform mat4 P; uniform mat4 MV; uniform mat4 invP; uniform mat4 invMV; void main() { /*The transformed clip space position c of a world space vertex v is obtained by transforming v with the product of the projection matrix P and the modelview matrix MV c = P MV v So, if we could solve for v, then we could genrerate vertex positions by plugging in clip space positions. For your frustum, one line would be between the clip space positions (-1,-1,near) and (-1,-1,far), the lower left edge of the frustum, for example. NB: If you would like to mix normalized device coords (x,y) and eye space coords (near,far), you need an additional step here. Modify your clip position as follows c' = (c.x * c.z, c.y * c.z, c.z, c.z) otherwise you would need to supply both the z and w for c, which might be inconvenient. Simply use c' instead of c below. To solve for v, multiply both sides of the equation above with -1 (P MV) This gives -1 (P MV) c = v This is equivalent to -1 -1 MV P c = v -1 P is given by |(r-l)/(2n) 0 0 (r+l)/(2n) | | 0 (t-b)/(2n) 0 (t+b)/(2n) | | 0 0 0 -1 | | 0 0 -(f-n)/(2fn) (f+n)/(2fn)| where l, r, t, b, n, and f are the parameters in the glFrustum() call. If you don't want to fool with inverting the model matrix, the info you already have can be used instead: the forward, right, and up vectors, in addition to the eye position. First, go from clip space to eye space -1 e = P c Next go from eye space to world space v = eyePos - forward*e.z + right*e.x + up*e.y assuming x = right, y = up, and -z = forward. */ vec4 fVp = invMV * invP * vec4(vp, 1.0); gl_Position = P * MV * fVp; } The uniforms are used like this (make sure you use the right matrices): // projector matrices glUniformMatrix4fv(iFrustumInvProjectionLoc, 1, GL_FALSE, glm::value_ptr(glm::inverse(projectorP))); glUniformMatrix4fv(iFrustumInvMVLoc, 1, GL_FALSE, glm::value_ptr(glm::inverse(projectorV))); // world camera glUniformMatrix4fv(iFrustumProjectionLoc, 1, GL_FALSE, glm::value_ptr(*pipeline->getProjectionMatrix())); glUniformMatrix4fv(iFrustumModelViewLoc, 1, GL_FALSE, glm::value_ptr(mModelView)); To get the input vertices needed for the frustum's vertex shader you can do the following to get the coordinates (then just add them to your vertex array): glm::vec3 ftl = glm::vec3(-1, +1, pFar); //far top left glm::vec3 fbr = glm::vec3(+1, -1, pFar); //far bottom right glm::vec3 fbl = glm::vec3(-1, -1, pFar); //far bottom left glm::vec3 ftr = glm::vec3(+1, +1, pFar); //far top right glm::vec3 ntl = glm::vec3(-1, +1, pNear); //near top left glm::vec3 nbr = glm::vec3(+1, -1, pNear); //near bottom right glm::vec3 nbl = glm::vec3(-1, -1, pNear); //near bottom left glm::vec3 ntr = glm::vec3(+1, +1, pNear); //near top right glm::vec3 frustum_coords[36] = { // near ntl, nbl, ntr, // 1 triangle ntr, nbl, nbr, // right nbr, ftr, ntr, ftr, nbr, fbr, // left nbl, ftl, ntl, ftl, nbl, fbl, // far ftl, fbl, fbr, fbr, ftr, ftl, //bottom nbl, fbr, fbl, fbr, nbl, nbr, //top ntl, ftr, ftl, ftr, ntl, ntr }; After all is said and done, it's nice to see how it looks: As you can see I applied two projective textures, one of a biohazard image on Blender's Suzanne monkey head, and a smiley texture on the floor and a small cube. You can also see that the cube is partly covered by the projective texture, while the rest of it appears with its default texture. Finally, you can see the green frustum wireframe for the projector camera - and everything looks correct.
Why can't access the G-Buffer from my lighting shader?
I implemented a new rendering pipeline in my engine and rendering is broken now. When I directly draw a texture of the G-Buffer to screen, it shows up correctly. So the G-Buffer is fine. But somehow the lighting pass makes trouble. Even if I don't use the resulting texture of it but try to display albedo from G-Buffer after the lighting pass, it shows a solid gray color. I can't explain this behavior and the strange thing is that there are no OpenGL errors at any point. Vertex Shader to draw a fullscreen quad. #version 330 in vec4 vertex; out vec2 coord; void main() { coord = vertex.xy; gl_Position = vertex * 2.0 - 1.0; } Fragment Shader for lighting. #version 330 in vec2 coord; out vec3 image; uniform int type = 0; uniform sampler2D positions; uniform sampler2D normals; uniform vec3 light; uniform vec3 color; uniform float radius; uniform float intensity = 1.0; void main() { if(type == 0) // directional light { vec3 normal = texture2D(normals, coord).xyz; float fraction = max(dot(normalize(light), normal) / 2.0 + 0.5, 0); image = intensity * color * fraction; } else if(type == 1) // point light { vec3 pixel = texture2D(positions, coord).xyz; vec3 normal = texture2D(normals, coord).xyz; float dist = max(distance(pixel, light), 1); float magnitude = 1 / pow(dist / radius + 1, 2); float cutoff = 0.4; float attenuation = clamp((magnitude - cutoff) / (1 - cutoff), 0, 1); float fraction = clamp(dot(normalize(light - pixel), normal), -1, 1); image = intensity * color * attenuation * max(fraction, 0.2); } } Targets and samplers for the lighting pass. Texture ids are mapped to attachment respectively shader location. unordered_map<GLenum, GLuint> targets; targets.insert(make_pair(GL_COLOR_ATTACHMENT2, ...)); // light targets.insert(make_pair(GL_DEPTH_STENCIL_ATTACHMENT, ...)); // depth and stencil unordered_map<string, GLuint> samplers; samplers.insert(make_pair("positions", ...)); // positions from G-Buffer samplers.insert(make_pair("normals", ...)); // normals from G-Buffer Draw function for lighting pass. void DrawLights(unordered_map<string, GLuint> Samplers, GLuint Program) { auto lis = Entity->Get<Light>(); glClear(GL_COLOR_BUFFER_BIT); glEnable(GL_BLEND); glBlendFunc(GL_ONE, GL_ONE); glUseProgram(Program); int n = 0; for(auto i : Samplers) { glActiveTexture(GL_TEXTURE0 + n); glBindTexture(GL_TEXTURE_2D, i.second); glUniform1i(glGetUniformLocation(Program, i.first.c_str()), n); n++; } mat4 view = Entity->Get<Camera>(*Global->Get<unsigned int>("camera"))->View; for(auto i : lis) { int type = i.second->Type == Light::DIRECTIONAL ? 0 : 1; vec3 pos = vec3(view * vec4(Entity->Get<Form>(i.first)->Position(), !type ? 0 : 1)); glUniform1i(glGetUniformLocation(Program, "type"), type); glUniform3f(glGetUniformLocation(Program, "light"), pos.x, pos.y, pos.z); glUniform3f(glGetUniformLocation(Program, "color"), i.second->Color.x, i.second->Color.y, i.second->Color.z); glUniform1f(glGetUniformLocation(Program, "radius"), i.second->Radius); glUniform1f(glGetUniformLocation(Program, "intensity"), i.second->Intensity); glBegin(GL_QUADS); glVertex2i(0, 0); glVertex2i(1, 0); glVertex2i(1, 1); glVertex2i(0, 1); glEnd(); } glDisable(GL_BLEND); glActiveTexture(GL_TEXTURE0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); }
I found the error and it was such a stupid one. The old rendering pipeline bound the correct framebuffer before calling the draw function of that pass. But the new one didn't so each draw function had to do that itself. Therefore I wanted to update all draw function, but I missed the draw function of the lighting pass. Therefore the framebuffer of the G-Buffer was still bound and the lighting pass changed its targets. Thanks to you guys, you had no change to find that error, since I hadn't posted my complete pipeline system.