Related
I have been playing around with OpenGL and shaders and got myself into shadow mapping.
Trying to follow tutorials on the Internet (ogldev and learnopengl), got some unexpected results.
The issue is best described with few screenshots (I have added a static quad with depth framebuffer for debugging):
Somehow I managed to get shadows to be rendered on a ground quad once, with a static light (this commit). But the shadow pattern is, again, incorrect. I strongly suspect model transformation matrix calculaitons on this:
The way I render the scene is quite straightforward:
create the pipelines:
for mapping the shadows (filling the depth frame buffer)
for rendering the scene using the depth frame buffer
(extra) debugging one, rendering depth frame buffer to a static quad on a screen
fill the depth frame buffer: using the shadow mapping pipeline, render the scene from the light point, using orthographic projection
render the shaded scene: using the rendering pipeline and depth frame buffer bind as the first texture, render the scene from a camera point, using perspective projection
Seems like the algorithm in all those tutorials on shadow mapping out there. Yet, instead of a mouray effect (like in all of the tutorials), I get no shadow on the bottom plane whatsoever and weird artifacts (incorrect shadow mapping) on the 3D (chicken) model.
Interestingly enough, if I do not render (for both the shadow mapping and final rendering pass) the chicken model, the plane is lit with the same weird pattern:
I also had to remove any normal transformations from the fragment shader and disable face culling to make the ground plane lit. With front-face culling the plane does not appear in the shadow map (depth buffer).
I assume the following might be causing this issue:
wrong depth frame buffer setup (data format or texture parameters)
flipped depth frame buffer texture
wrong shadow calculations in rendering shaders
wrong light matrices (view & projection) setup
wrong matrix calculations in the rendering shaders (given the model transformation matrices for both chicken model and the quad contain both rotation and scaling)
Unfortunately, I ran out of ideas even on how to assess the above assumptions.
Looking for any help on the matter (also feel free to criticize any of my approaches, including C++, CMake, OpenGL and computer graphics).
The full solution source code is available on GitHub, but for convenience I have placed the heavily cut source code below.
shadow-mapping.vert:
#version 410
layout (location = 0) in vec3 vertexPosition;
out gl_PerVertex
{
vec4 gl_Position;
};
uniform mat4 lightSpaceMatrix;
uniform mat4 modelTransformation;
void main()
{
gl_Position = lightSpaceMatrix * modelTransformation * vec4(vertexPosition, 1.0);
}
shadow-mapping.frag:
#version 410
layout (location = 0) out float fragmentDepth;
void main()
{
fragmentDepth = gl_FragCoord.z;
}
shadow-rendering.vert:
#version 410
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec3 vertexNormal;
layout (location = 2) in vec2 vertexTextureCoord;
out VS_OUT
{
vec3 fragmentPosition;
vec3 normal;
vec2 textureCoord;
vec4 fragmentPositionInLightSpace;
} vsOut;
out gl_PerVertex {
vec4 gl_Position;
};
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
uniform mat4 lightSpaceMatrix;
void main()
{
vsOut.fragmentPosition = vec3(model * vec4(vertexPosition, 1.0));
vsOut.normal = transpose(inverse(mat3(model))) * vertexNormal;
vsOut.textureCoord = vertexTextureCoord;
vsOut.fragmentPositionInLightSpace = lightSpaceMatrix * model * vec4(vertexPosition, 1.0);
gl_Position = projection * view * model * vec4(vertexPosition, 1.0);
}
shadow-rendering.frag:
#version 410
layout (location = 0) out vec4 fragmentColor;
in VS_OUT {
vec3 fragmentPosition;
vec3 normal;
vec2 textureCoord;
vec4 fragmentPositionInLightSpace;
} fsIn;
uniform sampler2D shadowMap;
uniform sampler2D diffuseTexture;
uniform vec3 lightPosition;
uniform vec3 lightColor;
uniform vec3 cameraPosition;
float shadowCalculation()
{
vec2 shadowMapCoord = fsIn.fragmentPositionInLightSpace.xy * 0.5 + 0.5;
float occluderDepth = texture(shadowMap, shadowMapCoord).r;
float thisDepth = fsIn.fragmentPositionInLightSpace.z * 0.5 + 0.5;
return occluderDepth < thisDepth ? 1.0 : 0.0;
}
void main()
{
vec3 color = texture(diffuseTexture, fsIn.textureCoord).rgb;
vec3 normal = normalize(fsIn.normal);
// ambient
vec3 ambient = 0.3 * color;
// diffuse
vec3 lightDirection = normalize(lightPosition - fsIn.fragmentPosition);
float diff = max(dot(lightDirection, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDirection = normalize(cameraPosition - fsIn.fragmentPosition);
vec3 halfwayDirection = normalize(lightDirection + viewDirection);
float spec = pow(max(dot(normal, halfwayDirection), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = shadowCalculation();
vec3 lighting = ((shadow * (diffuse + specular)) + ambient) * color;
fragmentColor = vec4(lighting, 1.0);
}
main.cpp, setting up shaders and frame buffer:
// loading the shadow mapping shaders
auto shadowMappingVertexProgram = ...;
auto shadowMappingFragmentProgram = ...;
auto shadowMappingLightSpaceUniform = shadowMappingVertexProgram->getUniform<glm::mat4>("lightSpaceMatrix");
auto shadowMappingModelTransformationUniform = shadowMappingVertexProgram->getUniform<glm::mat4>("modelTransformation");
auto shadowMappingPipeline = std::make_unique<globjects::ProgramPipeline>();
shadowMappingPipeline->useStages(shadowMappingVertexProgram.get(), gl::GL_VERTEX_SHADER_BIT);
shadowMappingPipeline->useStages(shadowMappingFragmentProgram.get(), gl::GL_FRAGMENT_SHADER_BIT);
// (omitted) loading the depth frame buffer debugging shaders and creating a pipeline here
// loading the rendering shaders
auto shadowRenderingVertexProgram = ...;
auto shadowRenderingFragmentProgram = ...;
auto shadowRenderingModelTransformationUniform = shadowRenderingVertexProgram->getUniform<glm::mat4>("model");
auto shadowRenderingViewTransformationUniform = shadowRenderingVertexProgram->getUniform<glm::mat4>("view");
auto shadowRenderingProjectionTransformationUniform = shadowRenderingVertexProgram->getUniform<glm::mat4>("projection");
auto shadowRenderingLightSpaceMatrixUniform = shadowRenderingVertexProgram->getUniform<glm::mat4>("lightSpaceMatrix");
auto shadowRenderingLightPositionUniform = shadowRenderingFragmentProgram->getUniform<glm::vec3>("lightPosition");
auto shadowRenderingLightColorUniform = shadowRenderingFragmentProgram->getUniform<glm::vec3>("lightColor");
auto shadowRenderingCameraPositionUniform = shadowRenderingFragmentProgram->getUniform<glm::vec3>("cameraPosition");
auto shadowRenderingPipeline = std::make_unique<globjects::ProgramPipeline>();
shadowRenderingPipeline->useStages(shadowRenderingVertexProgram.get(), gl::GL_VERTEX_SHADER_BIT);
shadowRenderingPipeline->useStages(shadowRenderingFragmentProgram.get(), gl::GL_FRAGMENT_SHADER_BIT);
// loading the chicken model
auto chickenModel = Model::fromAiNode(chickenScene, chickenScene->mRootNode, { "media" });
// INFO: this transformation is hard-coded specifically for Chicken.3ds model
chickenModel->setTransformation(glm::rotate(glm::scale(glm::mat4(1.0f), glm::vec3(0.01f)), glm::radians(-90.0f), glm::vec3(1.0f, 0, 0)));
// loading the quad model
auto quadModel = Model::fromAiNode(quadScene, quadScene->mRootNode);
// INFO: this transformation is hard-coded specifically for quad.obj model
quadModel->setTransformation(glm::rotate(glm::scale(glm::translate(glm::mat4(1.0f), glm::vec3(-5, 0, 5)), glm::vec3(10.0f, 0, 10.0f)), glm::radians(-90.0f), glm::vec3(1.0f, 0, 0)));
// loading the floor texture
sf::Image textureImage = ...;
auto defaultTexture = std::make_unique<globjects::Texture>(static_cast<gl::GLenum>(GL_TEXTURE_2D));
defaultTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_MIN_FILTER), static_cast<GLint>(GL_LINEAR));
defaultTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_MAG_FILTER), static_cast<GLint>(GL_LINEAR));
defaultTexture->image2D(0, static_cast<gl::GLenum>(GL_RGBA8), glm::vec2(textureImage.getSize().x, textureImage.getSize().y), 0, static_cast<gl::GLenum>(GL_RGBA), static_cast<gl::GLenum>(GL_UNSIGNED_BYTE), reinterpret_cast<const gl::GLvoid*>(textureImage.getPixelsPtr()));
// initializing the depth frame buffer
auto shadowMapTexture = std::make_unique<globjects::Texture>(static_cast<gl::GLenum>(GL_TEXTURE_2D));
shadowMapTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_MIN_FILTER), static_cast<gl::GLenum>(GL_LINEAR));
shadowMapTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_MAG_FILTER), static_cast<gl::GLenum>(GL_LINEAR));
shadowMapTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_WRAP_S), static_cast<gl::GLenum>(GL_CLAMP_TO_BORDER));
shadowMapTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_WRAP_T), static_cast<gl::GLenum>(GL_CLAMP_TO_BORDER));
shadowMapTexture->setParameter(static_cast<gl::GLenum>(GL_TEXTURE_BORDER_COLOR), glm::vec4(1.0f, 1.0f, 1.0f, 1.0f));
shadowMapTexture->image2D(0, static_cast<gl::GLenum>(GL_DEPTH_COMPONENT), glm::vec2(window.getSize().x, window.getSize().y), 0, static_cast<gl::GLenum>(GL_DEPTH_COMPONENT), static_cast<gl::GLenum>(GL_FLOAT), nullptr);
auto framebuffer = std::make_unique<globjects::Framebuffer>();
framebuffer->attachTexture(static_cast<gl::GLenum>(GL_DEPTH_ATTACHMENT), shadowMapTexture.get());
main.cpp, rendering (main loop):
// (omitted) event handling, camera updates go here
glm::mat4 cameraProjection = glm::perspective(glm::radians(fov), (float) window.getSize().x / (float) window.getSize().y, 0.1f, 100.0f);
glm::mat4 cameraView = glm::lookAt(cameraPos, cameraPos + cameraForward, cameraUp);
// moving light together with the camera, for debugging purposes
glm::vec3 lightPosition = cameraPos;
// light settings
const float nearPlane = 1.0f;
const float farPlane = 10.0f;
glm::mat4 lightProjection = glm::ortho(-5.0f, 5.0f, -5.0f, 5.0f, nearPlane, farPlane);
glm::mat4 lightView = glm::lookAt(lightPosition, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
glm::mat4 lightSpaceMatrix = lightProjection * lightView;
::glViewport(0, 0, static_cast<GLsizei>(window.getSize().x), static_cast<GLsizei>(window.getSize().y));
// first render pass - shadow mapping
framebuffer->bind();
::glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
::glClear(GL_DEPTH_BUFFER_BIT);
framebuffer->clearBuffer(static_cast<gl::GLenum>(GL_DEPTH), 0, glm::vec4(1.0f));
glEnable(GL_DEPTH_TEST);
// cull front faces to prevent peter panning the generated shadow map
glCullFace(GL_FRONT);
shadowMappingPipeline->use();
shadowMappingLightSpaceUniform->set(lightSpaceMatrix);
shadowMappingModelTransformationUniform->set(chickenModel->getTransformation());
chickenModel->draw();
shadowMappingModelTransformationUniform->set(quadModel->getTransformation());
quadModel->draw();
framebuffer->unbind();
shadowMappingPipeline->release();
glCullFace(GL_BACK);
// second pass - switch to normal shader and render picture with depth information to the viewport
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shadowRenderingPipeline->use();
shadowRenderingLightPositionUniform->set(lightPosition);
shadowRenderingLightColorUniform->set(glm::vec3(1.0, 1.0, 1.0));
shadowRenderingCameraPositionUniform->set(cameraPos);
shadowRenderingProjectionTransformationUniform->set(cameraProjection);
shadowRenderingViewTransformationUniform->set(cameraView);
shadowRenderingLightSpaceMatrixUniform->set(lightSpaceMatrix);
// draw chicken
shadowMapTexture->bind();
shadowRenderingModelTransformationUniform->set(chickenModel->getTransformation());
chickenModel->draw();
shadowRenderingModelTransformationUniform->set(quadModel->getTransformation());
defaultTexture->bind();
quadModel->draw();
defaultTexture->unbind();
shadowMapTexture->unbind();
shadowRenderingPipeline->release();
// (omitted) render the debugging quad with depth (shadow) map
window.display();
As shameful as it might be, the issue was with the wrong texture being bound.
The globjects library that I use to have few nice(-r) abstractions over OpenGL actually does not provide a smart logic around texture binding (as I blindly assumed). So using just Texture::bind() and Texture::unbind() won't automagically keep track of how many textures have been bound and increment an index.
E.g. it does not behave (roughly) like this:
static int boundTextureIndex = -1;
void Texture::bind() {
glBindTexture(this->textureType, this->textureId);
glActivateTexture(GL_TEXTURE0 + (++boundTextureIndex));
}
void Texture::unbind() {
--boundTextureIndex;
}
So after changing the texture->bind() to texture->bindActive(0) followed by shaderProgram->setUniform("texture", 0), I finally got to the mouray effect and correct shadow mapping:
Full change is in this commit.
I am new to shaders, and I want to animate an object with the vertex shader.
Right now I just want to move it with a constant. For some reason, instead of going in the x-direction of the world, it moves in the x-direction of the camera. (So whenever I turn the camera, the object rotates with me)
The project is in processing, but I don't think it affects the shader.
THE PROCESSING CODE:
PShader sdr;
void setup() {
size(1000, 1000, P3D);
noStroke();
sdr = loadShader("shdFrag.glsl", "shdVert.glsl");
}
void draw() {
background(200);
// Set camera
camera(0, -300, 700, mouseX-500, 0, 200, 0, 1, 0);
// Ground
resetShader();
beginShape();
fill(100);
vertex(-500, 0, 500);
vertex( 500, 0, 500);
vertex( 500, 0, -500);
vertex(-500, 0, -500);
endShape();
// Red Sphere
shader(sdr);
fill(255, 0, 0);
sphere(100);
}
VERTEX SHADER:
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
out vec4 vertColor;
void main() {
vec4 pos = position;
pos.x += 300;
vertColor = color;
gl_Position = transform * pos;
}
FRAGMENT SHADER:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
in vec4 vertColor;
void main() {
vec4 color = vertColor;
gl_FragColor = vec4(color.xyz, 1);
}
A GIF of what is happening:
the scene with a sphere
i' am implementing an algorithm of volume rendering "GPU ray casting single pass". For this, i used a float array of intensity values as 3d textures ( this 3d textures describes a regular 3d grid in spherical coordinates ).
Here there are example of array values:
75.839354473071637,
64.083049468866022,
65.253933716444365,
79.992431196592577,
84.411485976957096,
0.0000000000000000,
82.020319431382831,
76.808403454586994,
79.974774618246158,
0.0000000000000000,
91.127273013466336,
84.009956557448433,
90.221356094672814,
87.567422484025627,
71.940263118478072,
0.0000000000000000,
0.0000000000000000,
74.487058398181944,
..................,
..................
(Here the complete data:[link] (https://drive.google.com/file/d/1lbXzRucUseF-ITzFgxqeLTd0WglJJOoz/view?usp=sharing))
the dimensions of spherical grid are (r,theta,phi)=(384,15,768), and this is the input format for load textures:
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, 384, 15, 768, 0, GL_RED, GL_FLOAT, dataArray)
And This is an image of my visualization:
The problem is that the visuization should be a disk, or at least a similar form.
i think that the problme is i do not specify correctly the coordinates for textures( in spherical coordinates).
this is the vertex shader code:
#version 330 core
layout(location = 0) in vec3 vVertex; //object space vertex position
//uniform
uniform mat4 MVP; //combined modelview projection matrix
smooth out vec3 vUV; //3D texture coordinates for texture lookup in the fragment shader
void main()
{
//get the clipspace position
gl_Position = MVP*vec4(vVertex.xyz,1);
//get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space
//vertex position. Since the unit cube is at origin (min: (-0.5,-0.5,-0.5) and max: (0.5,0.5,0.5))
//adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to
//(1,1,1)
vUV = vVertex + vec3(0.5);
}
and this is the fragmen shader code:
#version 330 core
layout(location = 0) out vec4 vFragColor; //fragment shader output
smooth in vec3 vUV; //3D texture coordinates form vertex shader
//interpolated by rasterizer
//uniforms
uniform sampler3D volume; //volume dataset
uniform vec3 camPos; //camera position
uniform vec3 step_size; //ray step size
//constants
const int MAX_SAMPLES = 300; //total samples for each ray march step
const vec3 texMin = vec3(0); //minimum texture access coordinate
const vec3 texMax = vec3(1); //maximum texture access coordinate
vec4 colour_transfer(float intensity)
{
vec3 high = vec3(100.0, 20.0, 10.0);
// vec3 low = vec3(0.0, 0.0, 0.0);
float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);
return vec4(intensity * high, alpha);
}
void main()
{
//get the 3D texture coordinates for lookup into the volume dataset
vec3 dataPos = vUV;
//Getting the ray marching direction:
//get the object space position by subracting 0.5 from the
//3D texture coordinates. Then subtraact it from camera position
//and normalize to get the ray marching direction
vec3 geomDir = normalize((vUV-vec3(0.5)) - camPos);
//multiply the raymarching direction with the step size to get the
//sub-step size we need to take at each raymarching step
vec3 dirStep = geomDir * step_size;
//flag to indicate if the raymarch loop should terminate
bool stop = false;
//for all samples along the ray
for (int i = 0; i < MAX_SAMPLES; i++) {
// advance ray by dirstep
dataPos = dataPos + dirStep;
stop = dot(sign(dataPos-texMin),sign(texMax-dataPos)) < 3.0;
//if the stopping condition is true we brek out of the ray marching loop
if (stop)
break;
// data fetching from the red channel of volume texture
float sample = texture(volume, dataPos).r;
vec4 c = colour_transfer(sample);
vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a * vFragColor.rgb;
vFragColor.a = c.a + (1 - c.a) * vFragColor.a;
//early ray termination
//if the currently composited colour alpha is already fully saturated
//we terminated the loop
if( vFragColor.a>0.99)
break;
}
}
How can i specific the coordinates for i will visualize the information in the 3d textures, in spherical cordinates?
UPDATE:
vertex shader :
#version 330 core
layout(location = 0) in vec3 vVertex; //object space vertex position
//uniform
uniform mat4 MVP; //combined modelview projection matrix
smooth out vec3 vUV; //3D texture coordinates for texture lookup in the fragment shader
void main()
{
//get the clipspace position
gl_Position = MVP*vec4(vVertex.xyz,1);
//get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space
//vertex position. Since the unit cube is at origin (min: (-0.5,- 0.5,-0.5) and max: (0.5,0.5,0.5))
//adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to
//(1,1,1)
vUV = vVertex + vec3(0.5);
}
And fragment shader:
#version 330 core
#define Pi 3.1415926535897932384626433832795
layout(location = 0) out vec4 vFragColor; //fragment shader output
smooth in vec3 vUV; //3D texture coordinates form vertex shader
//interpolated by rasterizer
//uniforms
uniform sampler3D volume; //volume dataset
uniform vec3 camPos; //camera position
uniform vec3 step_size; //ray step size
//constants
const int MAX_SAMPLES = 200; //total samples for each ray march step
const vec3 texMin = vec3(0); //minimum texture access coordinate
const vec3 texMax = vec3(1); //maximum texture access coordinate
// transfer function that asigned a color and alpha from sample intensity
vec4 colour_transfer(float intensity)
{
vec3 high = vec3(100.0, 20.0, 10.0);
// vec3 low = vec3(0.0, 0.0, 0.0);
float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);
return vec4(intensity * high, alpha);
}
// this function transform vector in spherical coordinates from cartesian
vec3 cart2Sphe(vec3 cart){
vec3 sphe;
sphe.x = sqrt(cart.x*cart.x+cart.y*cart.y+cart.z*cart.z);
sphe.z = atan(cart.y/cart.x);
sphe.y = atan(sqrt(cart.x*cart.x+cart.y*cart.y)/cart.z);
return sphe;
}
void main()
{
//get the 3D texture coordinates for lookup into the volume dataset
vec3 dataPos = vUV;
//Getting the ray marching direction:
//get the object space position by subracting 0.5 from the
//3D texture coordinates. Then subtraact it from camera position
//and normalize to get the ray marching direction
vec3 vec=(vUV-vec3(0.5));
vec3 spheVec=cart2Sphe(vec); // transform position to spherical
vec3 sphePos=cart2Sphe(camPos); //transform camPos to spherical
vec3 geomDir= normalize(spheVec-sphePos); // ray direction
//multiply the raymarching direction with the step size to get the
//sub-step size we need to take at each raymarching step
vec3 dirStep = geomDir * step_size ;
//flag to indicate if the raymarch loop should terminate
//for all samples along the ray
for (int i = 0; i < MAX_SAMPLES; i++) {
// advance ray by dirstep
dataPos = dataPos + dirStep;
float sample;
convert texture coordinates
vec3 spPos;
spPos.x=dataPos.x/384;
spPos.y=(dataPos.y+(Pi/2))/Pi;
spPos.z=dataPos.z/(2*Pi);
// get value from texture
sample = texture(volume,dataPos).r;
vec4 c = colour_transfer(sample)
// alpha blending function
vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a * vFragColor.rgb;
vFragColor.a = c.a + (1 - c.a) * vFragColor.a;
if( vFragColor.a>1.0)
break;
}
// vFragColor.rgba = texture(volume,dataPos);
}
these are the point that generate a boundary cube:
glm::vec3 vertices[8] = {glm::vec3(-0.5f, -0.5f, -0.5f),
glm::vec3(0.5f, -0.5f, -0.5f),
glm::vec3(0.5f, 0.5f, -0.5f),
glm::vec3(-0.5f, 0.5f, -0.5f),
glm::vec3(-0.5f, -0.5f, 0.5f),
glm::vec3(0.5f, -0.5f, 0.5f),
glm::vec3(0.5f, 0.5f, 0.5f),
glm::vec3(-0.5f, 0.5f, 0.5f)};
//unit cube indices
GLushort cubeIndices[36] = {0, 5, 4,
5, 0, 1,
3, 7, 6,
3, 6, 2,
7, 4, 6,
6, 4, 5,
2, 1, 3,
3, 1, 0,
3, 0, 7,
7, 0, 4,
6, 5, 2,
2, 5, 1};
this is the visualization that it is generated:
I do not know what and how are you rendering. There are many techniques and configurations which can achieve them. I am usually using a single pass single quad render covering the screen/view while geometry/scene is passed as texture. As you have your object in a 3D texture then I think you should go this way too. This is how its done (Assuming perspective, uniform spherical voxel grid as a 3D texture):
CPU side code
simply render single QUAD covering the scene/view. To make this more simple and precise I recommend you to use your sphere local coordinate system for camera matrix which is passed to the shaders (it will ease up the ray/sphere intersections computations a lot).
Vertex
here you should cast/compute the ray position and direction for each vertex and pass it to the fragment so its interpolated for each pixel on the screen/view.
So the camera is described by its position (focal point) and view direction (usually Z- axis in perspective OpenGL). The ray is casted from the focal point (0,0,0) in camera local coordinates into the znear plane (x,y,-znear) also in camera local coordinates. Where x,y is the pixel screen position wit aspect ratio corrections applied if screen/view is not a square.
So you just convert these two points into sphere local coordinates (still Cartesian).
The ray direction is just substraction of the two points...
Fragment
first normalize ray direction passed from vertex (as due to interpolation it will not be unit vector). After that simply test ray/sphere intersection for each radius of the sphere voxel grid from outward to inward so test spheres from rmax to rmax/n where rmax is the max radius your 3D texture can have and n is ids resolution for axis corresponding to radius r.
On each hit convert the Cartesian intersection position to Spherical coordinates. Convert them to texture coordinates s,t,p and fetch the Voxel intensity and apply it to the color (how depends on what and how are you rendering).
So if your texture coordinates are (r,theta,phi)assuming phi is longitude and angles are normalized to <-Pi,Pi> and <0,2*Pi> and rmax is the max radius of the 3D texture then:
s = r/rmax
t = (theta+(Pi/2))/Pi
p = phi/(2*PI)
If your sphere is not transparent then stop on first hit with not empty Voxel intensity. Otherwise update ray start position and do this whole bullet again until ray goes out of the scene BBOX or no intersection occurs.
You can also add Snell's law (add reflection refraction) by splitting ray on object boundary hits...
Here are some related QAs using this technique or having valid info that will help you achieve this:
GLSL atmospheric scattering this is almost the same as you should do.
ray and ellipsoid intersection accuracy improvement math for the intersections
Curved Frosted Glass Shader? sub surface scattering
GLSL back raytrace through 3D mesh reflections and refractions in geometry inside 2D texture
GLSL back raytrace through 3D volume 3D Cartesian volume inside 3D texture
[Edit1] example (after the input 3D texture was finally posted
So when I put all the stuff above (and in comments) together I come up with this.
CPU side code:
//---------------------------------------------------------------------------
//--- GLSL Raytrace system ver: 1.000 ---------------------------------------
//---------------------------------------------------------------------------
#ifndef _raytrace_spherical_volume_h
#define _raytrace_spherical_volume_h
//---------------------------------------------------------------------------
class SphericalVolume3D
{
public:
bool _init; // has been initiated ?
GLuint txrvol; // SphericalVolume3D texture at GPU side
int xs,ys,zs;
float eye[16]; // direct camera matrix
float aspect,focal_length;
SphericalVolume3D() { _init=false; txrvol=-1; xs=0; ys=0; zs=0; aspect=1.0; focal_length=1.0; }
SphericalVolume3D(SphericalVolume3D& a) { *this=a; }
~SphericalVolume3D() { gl_exit(); }
SphericalVolume3D* operator = (const SphericalVolume3D *a) { *this=*a; return this; }
//SphericalVolume3D* operator = (const SphericalVolume3D &a) { ...copy... return this; }
// init/exit
void gl_init();
void gl_exit();
// render
void glsl_draw(GLint prog_id);
};
//---------------------------------------------------------------------------
void SphericalVolume3D::gl_init()
{
if (_init) return; _init=true;
// load 3D texture from file into CPU side memory
int hnd,siz; BYTE *dat;
hnd=FileOpen("Texture3D_F32.dat",fmOpenRead);
siz=FileSeek(hnd,0,2);
FileSeek(hnd,0,0);
dat=new BYTE[siz];
FileRead(hnd,dat,siz);
FileClose(hnd);
if (0)
{
int i,n=siz/sizeof(GLfloat);
GLfloat *p=(GLfloat*)dat;
for (i=0;i<n;i++) p[i]=100.5;
}
// copy it to GPU as 3D texture
// glClampColorARB(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE);
// glClampColorARB(GL_CLAMP_READ_COLOR_ARB, GL_FALSE);
// glClampColorARB(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE);
glGenTextures(1,&txrvol);
glEnable(GL_TEXTURE_3D);
glBindTexture(GL_TEXTURE_3D,txrvol);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE,GL_MODULATE);
xs=384;
ys= 15;
zs=768;
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, xs,ys,zs, 0, GL_RED, GL_FLOAT, dat);
glBindTexture(GL_TEXTURE_3D,0);
glDisable(GL_TEXTURE_3D);
delete[] dat;
}
//---------------------------------------------------------------------------
void SphericalVolume3D::gl_exit()
{
if (!_init) return; _init=false;
glDeleteTextures(1,&txrvol);
}
//---------------------------------------------------------------------------
void SphericalVolume3D::glsl_draw(GLint prog_id)
{
GLint ix;
const int txru_vol=0;
glUseProgram(prog_id);
// uniforms
ix=glGetUniformLocation(prog_id,"zoom" ); glUniform1f(ix,1.0);
ix=glGetUniformLocation(prog_id,"aspect" ); glUniform1f(ix,aspect);
ix=glGetUniformLocation(prog_id,"focal_length"); glUniform1f(ix,focal_length);
ix=glGetUniformLocation(prog_id,"vol_xs" ); glUniform1i(ix,xs);
ix=glGetUniformLocation(prog_id,"vol_ys" ); glUniform1i(ix,ys);
ix=glGetUniformLocation(prog_id,"vol_zs" ); glUniform1i(ix,zs);
ix=glGetUniformLocation(prog_id,"vol_txr" ); glUniform1i(ix,txru_vol);
ix=glGetUniformLocation(prog_id,"tm_eye" ); glUniformMatrix4fv(ix,1,false,eye);
glActiveTexture(GL_TEXTURE0+txru_vol);
glEnable(GL_TEXTURE_3D);
glBindTexture(GL_TEXTURE_3D,txrvol);
// this should be a VAO/VBO
glColor4f(1.0,1.0,1.0,1.0);
glBegin(GL_QUADS);
glVertex2f(-1.0,-1.0);
glVertex2f(-1.0,+1.0);
glVertex2f(+1.0,+1.0);
glVertex2f(+1.0,-1.0);
glEnd();
glActiveTexture(GL_TEXTURE0+txru_vol);
glBindTexture(GL_TEXTURE_3D,0);
glDisable(GL_TEXTURE_3D);
glUseProgram(0);
}
//---------------------------------------------------------------------------
#endif
//---------------------------------------------------------------------------
call init on app start when GL is already inited, exit before app exit while GL still works and draw when needed... The code is C++/VCL based so port to your environment (file access, strings, etc..) I also use the 3D texture in binary form as loading 85MByte ASCII file is a bit too much for my taste.
Vertex:
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform float aspect;
uniform float focal_length;
uniform float zoom;
uniform mat4x4 tm_eye;
layout(location=0) in vec2 pos;
out smooth vec3 ray_pos; // ray start position
out smooth vec3 ray_dir; // ray start direction
//------------------------------------------------------------------
void main(void)
{
vec4 p;
// perspective projection
p=tm_eye*vec4(pos.x/(zoom*aspect),pos.y/zoom,0.0,1.0);
ray_pos=p.xyz;
p-=tm_eye*vec4(0.0,0.0,-focal_length,1.0);
ray_dir=normalize(p.xyz);
gl_Position=vec4(pos,0.0,1.0);
}
//------------------------------------------------------------------
its more or less a copy from the volumetric ray tracer link.
Fragment:
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
// Ray tracer ver: 1.000
//------------------------------------------------------------------
in smooth vec3 ray_pos; // ray start position
in smooth vec3 ray_dir; // ray start direction
uniform int vol_xs, // texture resolution
vol_ys,
vol_zs;
uniform sampler3D vol_txr; // scene mesh data texture
out layout(location=0) vec4 frag_col;
//---------------------------------------------------------------------------
// compute length of ray(p0,dp) to intersection with ellipsoid((0,0,0),r) -> view_depth_l0,1
// where r.x is elipsoid rx^-2, r.y = ry^-2 and r.z=rz^-2
float view_depth_l0=-1.0,view_depth_l1=-1.0;
bool _view_depth(vec3 _p0,vec3 _dp,vec3 _r)
{
double a,b,c,d,l0,l1;
dvec3 p0,dp,r;
p0=dvec3(_p0);
dp=dvec3(_dp);
r =dvec3(_r );
view_depth_l0=-1.0;
view_depth_l1=-1.0;
a=(dp.x*dp.x*r.x)
+(dp.y*dp.y*r.y)
+(dp.z*dp.z*r.z); a*=2.0;
b=(p0.x*dp.x*r.x)
+(p0.y*dp.y*r.y)
+(p0.z*dp.z*r.z); b*=2.0;
c=(p0.x*p0.x*r.x)
+(p0.y*p0.y*r.y)
+(p0.z*p0.z*r.z)-1.0;
d=((b*b)-(2.0*a*c));
if (d<0.0) return false;
d=sqrt(d);
l0=(-b+d)/a;
l1=(-b-d)/a;
if (abs(l0)>abs(l1)) { a=l0; l0=l1; l1=a; }
if (l0<0.0) { a=l0; l0=l1; l1=a; }
if (l0<0.0) return false;
view_depth_l0=float(l0);
view_depth_l1=float(l1);
return true;
}
//---------------------------------------------------------------------------
const float pi =3.1415926535897932384626433832795;
const float pi2=6.2831853071795864769252867665590;
float atanxy(float x,float y) // atan2 return < 0 , 2.0*M_PI >
{
int sx,sy;
float a;
const float _zero=1.0e-30;
sx=0; if (x<-_zero) sx=-1; if (x>+_zero) sx=+1;
sy=0; if (y<-_zero) sy=-1; if (y>+_zero) sy=+1;
if ((sy==0)&&(sx==0)) return 0;
if ((sx==0)&&(sy> 0)) return 0.5*pi;
if ((sx==0)&&(sy< 0)) return 1.5*pi;
if ((sy==0)&&(sx> 0)) return 0;
if ((sy==0)&&(sx< 0)) return pi;
a=y/x; if (a<0) a=-a;
a=atan(a);
if ((x>0)&&(y>0)) a=a;
if ((x<0)&&(y>0)) a=pi-a;
if ((x<0)&&(y<0)) a=pi+a;
if ((x>0)&&(y<0)) a=pi2-a;
return a;
}
//---------------------------------------------------------------------------
void main(void)
{
float a,b,r,_rr,c;
const float dr=1.0/float(vol_ys); // r step
const float saturation=1000.0; // color saturation voxel value
vec3 rr,p=ray_pos,dp=normalize(ray_dir);
for (c=0.0,r=1.0;r>1e-10;r-=dr) // check all radiuses inwards
{
_rr=1.0/(r*r); rr=vec3(_rr,_rr,_rr);
if (_view_depth(p,dp,rr)) // if ray hits sphere
{
p+=view_depth_l0*dp; // shift ray start position to the hit
a=atanxy(p.x,p.y); // comvert to spherical a,b,r
b=asin(p.z/r);
if (a<0.0) a+=pi2; // correct ranges...
b+=0.5*pi;
a/=pi2;
b/=pi;
// here do your stuff
c=texture(vol_txr,vec3(b,r,a)).r;// fetch voxel
if (c>saturation){ c=saturation; break; }
break;
}
}
c/=saturation;
frag_col=vec4(c,c,c,1.0);
}
//---------------------------------------------------------------------------
its a slight modification of the volumetric ray tracer link.
Beware that I assume that the axises inside the texture are:
latitude,r,longitude
implied by the resolutions (longitude should be double resolution of the latitude) so if it does not match your data just reorder the axises in fragment ... I have no clue what the values of the Voxel cell mean so I sum them like intensity/density for the final color and once saturation sum reached stop the raytrace but instead you should your computation stuff you intend.
Here preview:
I used this camera matrix eye for it:
// globals
SphericalVolume3D vol;
// init (GL must be already working)
vol.gl_init();
// render
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_CULL_FACE);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0,0.0,-2.5);
glGetFloatv(GL_MODELVIEW_MATRIX,vol.eye);
vol.glsl_draw(prog_id);
glFlush();
SwapBuffers(hdc);
// exit (GL must be still working)
vol.gl_init();
The ray/sphere hit is working properly, also the hit position in spherical coordinates are working as should so the only thing left is the axis order and color arithmetics ...
I have a 3 dimensional dataset where each value of the dataset is normalized to [0, 1]. I want to visualize this dataset by using texture, and blending.
However, it seems that I can't make it work.
Here is what I have done so far:
int main(){
...
//building an image for each rectangular slice of data
vector<Texture> myTextures;
for (GLint rowIndex = 0; rowIndex < ROW_NUM; rowIndex++)
{
auto pathToImage = "images/row" + to_string(rowIndex) + FILE_EXT;
FIBITMAP *img = FreeImage_Allocate(SLICE_DIMENSION, SLICE_NUM, 32); //32 = RGBA
for (GLint depthIndex = 0; depthIndex < DEPTH_NUM; depthIndex++)
{
for (GLint colIndex = 0; colIndex < COL_NUM; colIndex++)
{
auto value = my3DData[depthIndex][rowIndex][colIndex];
//transform tValue to a source color
glm::vec4 source = transformValueToColor(value);
//note that here I am also setting the opacity.
RGBQUAD linRgb = { source.b, source.g, source.r, source.a };
FreeImage_SetPixelColor(img, colIndex, depthIndex, &linRgb);
}
}
//Saving images. Saved images shows transparency.
FreeImage_Save(FIF_PNG, img, pathToImage.c_str());
myTextures.push_back(Texture(pathToImage.c_str(), GL_TEXTURE0));
}
//create VAO, VBO, EBO for a unit quad.
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//game loop
while (!glfwWindowShouldClose(window))
{
...
for (int i = 0; i < myTextures.size(); i++)
{
GLint index = myTextures.size() - i - 1;
myTextures[index].bind(); //does glActiveTexture(...), and glBindTexture(...);
glm::mat4 model;
//translate
model = glm::translate(model, glm::vec3(0.0f, 0.0f, -index*0.003f));
//scale
model = glm::scale(model, glm::vec3(1.2f));
glUniformMatrix4fv(glGetUniformLocation(ourShader.Program, "model"), 1, GL_FALSE, glm::value_ptr(model));
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
}
}
transformValueToColorfor transforming data value in [0,1] to color value:
//All inputs >=0.6 is transformed to highly transparent white color.
glm::vec4 transformValueToColor(GLclampf tValue)
{
if (tValue >= 0.6f) {
return glm::vec4(255, 255, 255, 10);
}
else {
auto val = round(255 * tValue);
auto valOp = round(255 * (1 - tValue));
return glm::vec4(val, val, val, valOp);
}
}
My vertex shader:
#version 330 core
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texCoord;
out vec2 TexCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
TexCoord = vec2(texCoord.s, 1-texCoord.t);
}
My fragment shader:
#version 330 core
in vec2 TexCoord;
out vec4 color;
uniform sampler2D sliceTexture;
void main()
{
vec4 texColor = texture(sliceTexture, TexCoord);
color = texColor;
}
I think this is the code needed for the blending to work. The images are generated correctly, and also applied as texture on the quads correctly. However, the quads on the front appears as completely opaque, though the generated images (even the one appearing at front) shows transparent area.
I am not sure where I am going wrong. Requesting your suggestions.
Thank you.
Edit: details of Texture class (only the parts relevant to loading RGBA image and creating RGBA texture from that):
int width, height, channelCount;
unsigned char* image = SOIL_load_image(pathToImage, &width, &height, &channelCount, SOIL_LOAD_RGBA);
...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
Edit2: Added details of camera class. Camera::getViewMatrix() provides view matrix.
Camera::Camera(GLFWwindow* window, glm::vec3 position, glm::vec3 worldUpDirection, GLfloat yaw, GLfloat pitch)
:mouseSensitivity(0.25f), fov(45.0f), cameraSpeed(1.0f)
{
this->position = this->originalPosition = position;
this->worldUpDirection = worldUpDirection;
this->yaw = this->originalYaw = yaw;
this->pitch = this->originalPitch = pitch;
updateCameraVectors();
}
void Camera::updateCameraVectors()
{
glm::mat4 yawPitchRotMat;
yawPitchRotMat = glm::rotate(yawPitchRotMat, glm::radians(yaw), glm::vec3(0.0f, 1.0f, 0.0f)); //y-ais as yaw axis
yawPitchRotMat = glm::rotate(yawPitchRotMat, glm::radians(pitch), glm::vec3(1.0f, 0.0f, 0.0f)); //x-axis as pitch axis
frontDirection = glm::normalize(-glm::vec3(yawPitchRotMat[2].x, yawPitchRotMat[2].y, yawPitchRotMat[2].z));
rightDirection = glm::normalize(glm::cross(frontDirection, worldUpDirection));
upDirection = glm::normalize(glm::cross(rightDirection, frontDirection));
}
glm::mat4 Camera::getViewMatrix()
{
return glm::lookAt(position, position + frontDirection, upDirection);
}
myTextures.push_back(Texture(pathToImage.c_str(), GL_TEXTURE0));
This doesn't include any information about your Texture class and how it's parsing the image files. Even if the files themselves show transparency, it's possible that your texture loading code is discarding the alpha channel when you load the image.
model = glm::translate(model, glm::vec3(0.0f, 0.0f, -index*0.003f));
This makes it look as if you're rendering from front to back. If you want to render transparent objects, on top of one another you need to use an algorithm for order independent transparency, or you need to render from back to front.
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
You may also want / need to use glBlendFuncSeparate so that the mixing of the alpha channel is done differently. I'm not sure about this one though.
You may also want to consider simply populating a single GL_TEXTURE_3D object and rendering it as a cube, doing all the mixing in the fragment shader, rather than rendering a series of quads for each layer.
I implemented a new rendering pipeline in my engine and rendering is broken now. When I directly draw a texture of the G-Buffer to screen, it shows up correctly. So the G-Buffer is fine. But somehow the lighting pass makes trouble. Even if I don't use the resulting texture of it but try to display albedo from G-Buffer after the lighting pass, it shows a solid gray color.
I can't explain this behavior and the strange thing is that there are no OpenGL errors at any point.
Vertex Shader to draw a fullscreen quad.
#version 330
in vec4 vertex;
out vec2 coord;
void main()
{
coord = vertex.xy;
gl_Position = vertex * 2.0 - 1.0;
}
Fragment Shader for lighting.
#version 330
in vec2 coord;
out vec3 image;
uniform int type = 0;
uniform sampler2D positions;
uniform sampler2D normals;
uniform vec3 light;
uniform vec3 color;
uniform float radius;
uniform float intensity = 1.0;
void main()
{
if(type == 0) // directional light
{
vec3 normal = texture2D(normals, coord).xyz;
float fraction = max(dot(normalize(light), normal) / 2.0 + 0.5, 0);
image = intensity * color * fraction;
}
else if(type == 1) // point light
{
vec3 pixel = texture2D(positions, coord).xyz;
vec3 normal = texture2D(normals, coord).xyz;
float dist = max(distance(pixel, light), 1);
float magnitude = 1 / pow(dist / radius + 1, 2);
float cutoff = 0.4;
float attenuation = clamp((magnitude - cutoff) / (1 - cutoff), 0, 1);
float fraction = clamp(dot(normalize(light - pixel), normal), -1, 1);
image = intensity * color * attenuation * max(fraction, 0.2);
}
}
Targets and samplers for the lighting pass. Texture ids are mapped to attachment respectively shader location.
unordered_map<GLenum, GLuint> targets;
targets.insert(make_pair(GL_COLOR_ATTACHMENT2, ...)); // light
targets.insert(make_pair(GL_DEPTH_STENCIL_ATTACHMENT, ...)); // depth and stencil
unordered_map<string, GLuint> samplers;
samplers.insert(make_pair("positions", ...)); // positions from G-Buffer
samplers.insert(make_pair("normals", ...)); // normals from G-Buffer
Draw function for lighting pass.
void DrawLights(unordered_map<string, GLuint> Samplers, GLuint Program)
{
auto lis = Entity->Get<Light>();
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glUseProgram(Program);
int n = 0; for(auto i : Samplers)
{
glActiveTexture(GL_TEXTURE0 + n);
glBindTexture(GL_TEXTURE_2D, i.second);
glUniform1i(glGetUniformLocation(Program, i.first.c_str()), n);
n++;
}
mat4 view = Entity->Get<Camera>(*Global->Get<unsigned int>("camera"))->View;
for(auto i : lis)
{
int type = i.second->Type == Light::DIRECTIONAL ? 0 : 1;
vec3 pos = vec3(view * vec4(Entity->Get<Form>(i.first)->Position(), !type ? 0 : 1));
glUniform1i(glGetUniformLocation(Program, "type"), type);
glUniform3f(glGetUniformLocation(Program, "light"), pos.x, pos.y, pos.z);
glUniform3f(glGetUniformLocation(Program, "color"), i.second->Color.x, i.second->Color.y, i.second->Color.z);
glUniform1f(glGetUniformLocation(Program, "radius"), i.second->Radius);
glUniform1f(glGetUniformLocation(Program, "intensity"), i.second->Intensity);
glBegin(GL_QUADS);
glVertex2i(0, 0);
glVertex2i(1, 0);
glVertex2i(1, 1);
glVertex2i(0, 1);
glEnd();
}
glDisable(GL_BLEND);
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
I found the error and it was such a stupid one. The old rendering pipeline bound the correct framebuffer before calling the draw function of that pass. But the new one didn't so each draw function had to do that itself. Therefore I wanted to update all draw function, but I missed the draw function of the lighting pass.
Therefore the framebuffer of the G-Buffer was still bound and the lighting pass changed its targets.
Thanks to you guys, you had no change to find that error, since I hadn't posted my complete pipeline system.