Sampling 3D texture to get a 2D image - c++

I have a scalar field of values which I have mapped to a 3D texture( image_texture ). And then given a plane gPlaneParams , I have to render the texture of the scalar-field along it.
What I'm doing:
I send 4 points which span the window dimensions using two triangles to the shaders. I bind the texture using a sampler in the fragment shader. Below is the fragment shader code.
#version 330 core
uniform sampler3D text_sampler;
uniform vec4 gPlaneParams;
in vec4 inPos;
void main()
{
vec4 Pos = inPos;
// position input is a square[-1,1]^2
// and needs to be mapped the plane ax+by+cz=d, where a,b,c,d are the plane parameters;
//where x,y,z belongs to [0,1]^3
if (gPlaneParams.z!=0){
Pos.z = (gPlaneParams.w - gPlaneParams.x*Pos.x - gPlaneParams.y*Pos.y)/gPlaneParams.z;
}
else{
if (gPlaneParams.x!=0){
Pos.z=Pos.x;
Pos.x = (gPlaneParams.w - gPlaneParams.y*Pos.y - gPlaneParams.z*Pos.z)/gPlaneParams.x;
}
else if (gPlaneParams.y!=0){
Pos.z=Pos.y;
Pos.y = (gPlaneParams.w - gPlaneParams.x*Pos.x - gPlaneParams.z*Pos.z)/gPlaneParams.y;
}
}
gl_FragColor=vec4(1.0,0,0,0)*texture3D(text_sampler,(Pos.xyz+1)/2);
}
In my C++ code, I bind the texture as follows:
glGenTextures(1,textureID);
glBindTexture(GL_TEXTURE_3D,textureID[0]);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGB,object_size[0],object_size[1],object_size[2], 0, GL_RGB, GL_UNSIGNED_INT, image_texture1);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_3D,GL_TEXTURE_WRAP_R,GL_CLAMP_TO_EDGE);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D,textureID[0]);
bool err=glIsTexture(textureID[0]);
cout<<"Texture bound?"<<err<<endl;
Unfortunately, this does not render any output. Can someone help me figure out what I'm doing wrong?
I have done everything else correctly
The 4 Vertices and 2 triangles are properly bound (I can render them by giving them constant colours)
The image texture is contiguous in memory image_texture = (unsigned int*) malloc(object_size[0] *object_size[1] *object_size[2]*3* sizeof(unsigned int));
All my inputs to the shader are successfully bound.:
gSamplerLocation = glGetUniformLocation(ShaderProgram, "text_sampler");
gPLaneLoc = glGetUniformLocation(ShaderProgram, "gPlaneParams");
glUniform1i(gSamplerLocation, 0);
glUniform4f(gPLaneLoc,plane_params[0],plane_params[1],plane_params[2],plane_params[3]);

Related

Deferred Rendering not Displaying the GBuffer Textures

I'm trying to implement deferred rendering within an engine I'm developing as a personal learning, and I cannot get to understand what I'm doing wrong when it comes to render all the textures in the GBuffer to check if the implementation is okay.
The thing is that I currently have a framebuffer with 3 color attachments for the different textures of the GBuffer (color, normal and position), which I initialize as follows:
glCreateFramebuffers(1, &id);
glBindFramebuffer(GL_FRAMEBUFFER, id);
std::vector<uint> textures;
textures.resize(3);
glCreateTextures(GL_TEXTURE_2D, 3, textures.data());
for(size_t i = 0; i < 3; ++i)
{
glBindTexture(GL_TEXTURE_2D, textures[i]);
if(i == 0) // For Color Buffer
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
else
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, width, height, 0, GL_RGBA, GL_FLOAT, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, textures[i], 0);
}
GLenum color_buffers[3] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2 };
glDrawBuffers((GLsizei)textures.size(), color_buffers);
uint depth_texture;
glCreateTextures(GL_TEXTURE_2D, 1, &depth_texture);
glBindTexture(GL_TEXTURE_2D, depth_texture);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_DEPTH24_STENCIL8, width, height);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depth_texture, 0);
bool fbo_status = glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE;
ASSERT(fbo_status, "Framebuffer Incompleted!");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
This is not reporting any errors and it seems to work since the framebuffer of the forward renderer renders properly. Then, when rendering, I run the next code after binding the framebuffer and clearing the color and depth buffers:
camera_buffer->Bind();
camera_buffer->SetData("ViewProjection", glm::value_ptr(viewproj_mat));
camera_buffer->SetData("CamPosition", glm::value_ptr(glm::vec4(view_position, 0.0f)));
camera_buffer->Unbind();
for(Entity& entity : scene_entities)
{
shader->Bind();
Texture* texture = entity.GetTexture();
BindTexture(0, texture);
shader->SetUniformMat4("u_Model", entity.transform);
shader->SetUniformInt("u_Albedo", 0);
shader->SetUniformVec4("u_Material.AlbedoColor", entity->AlbedoColor);
shader->SetUniformFloat("u_Material.Smoothness", entity->Smoothness);
glBindVertexArray(entity.VertexArray);
glDrawElements(GL_TRIANGLES, entity.VertexArray.index_buffer.count, GL_UNSIGNED_INT, nullptr);
// Shader, VArray and Textures Unbindings
}
So with this code I manage to render the 3 textures created by using the ImGui::Image function, by switching the texture index between 0, 1 or 2 as the next:
ImGui::Image((ImTextureID)(fbo->textures[0]), viewport_size, ImVec2(0, 1), ImVec2(1, 0));
Now, the color texture (at index 0) works perfectly, as the next image shows:
But when rendering the normals and position textures (indexes 2 and 3), I have no result:
Does anybody sees what I'm doing wrong? Because I've been hours and hours with this and I cannot see it. I ran this on RenderDoc and I couldn't see anything wrong, the textures displayed in RenderDoc are the same than in the engine.
The vertex shader I use when rendering the entities is the next:
layout(location = 0) in vec3 a_Position;
layout(location = 1) in vec2 a_TexCoord;
layout(location = 2) in vec3 a_Normal;
out IBlock
{
vec2 TexCoord;
vec3 FragPos;
vec3 Normal;
} v_VertexData;
layout(std140, binding = 0) uniform ub_CameraData
{
mat4 ViewProjection;
vec3 CamPosition;
};
uniform mat4 u_ViewProjection = mat4(1.0);
uniform mat4 u_Model = mat4(1.0);
void main()
{
vec4 world_pos = u_Model * vec4(a_Position, 1.0);
v_VertexData.TexCoord = a_TexCoord;
v_VertexData.FragPos = world_pos.xyz;
v_VertexData.Normal = transpose(inverse(mat3(u_Model))) * a_Normal;
gl_Position = ViewProjection * u_Model * vec4(a_Position, 1.0);
}
And the fragment one is the next, they are both pretty simple:
layout(location = 0) out vec4 gBuff_Color;
layout(location = 1) out vec3 gBuff_Normal;
layout(location = 2) out vec3 gBuff_Position;
in IBlock
{
vec2 TexCoord;
vec3 FragPos;
vec3 Normal;
} v_VertexData;
struct Material
{
float Smoothness;
vec4 AlbedoColor;
};
uniform Material u_Material = Material(1.0, vec4(1.0));
uniform sampler2D u_Albedo, u_Normal;
void main()
{
gBuff_Color = texture(u_Albedo, v_VertexData.TexCoord) * u_Material.AlbedoColor;
gBuff_Normal = normalize(v_VertexData.Normal);
gBuff_Position = v_VertexData.FragPos;
}
It is not clear from the question what exactly might be happening here, as lots of GL states - both at the time the rendering to the gbuffer, and at that time the gbuffer texture is rendered for visualization - are just unknown. However, from the images given in the question, one can not conclude that the actual color output for attachments 1 and 2 is not working.
One issue which comes to mind is alpha blending. The color values processed by the per-fragment operations after the vertex shader are always working with RGBA values - although the value of the A channel only matters if you enabled blending and use a blend function which somehow depends on the source alpha.
If you declare a custom fragment shader output as float, vec2, vec3, the remaining components stay undefined (undefined value, not undefined behavior). This does not impose a problem unless some other operations you do depend on those values.
What we also have here is a GL_RGBA16F output format (which is the right choice, because none of the 3-component RGB formats are required as color-renderable by the spec).
What might happen here is either:
Alpha blending is already turned on during rendering into the g-buffer. The fragment shader's alpha output happens to be zero, so that it appears as 100% transparent and the contents of the texture are not changed.
Alpha blending is not used during rendering into the g-buffer, so the correct contents end up in the texture, the alpha channel just happens to end up with all zeros. Now the texture might be visualized with alpha blending enbaled, ending up in a 100% transparent view.
If it is the first option, turn off blending when rendering the into the g-buffer. It would not work with deferred shading anyway. You might still run into the second option then.
If this is the second option, there is no issue at all - the lighting passes which follow will read the data they need (and ultimately, you will want to put useful information into the alpha channel to not waste it and be able to reduce the number of attachments). It is just your visualization (which I assume is for debug purposed only) is wrong. You can try to fix the visualization.
As a side note: Storing the world space position in the G-Buffer is a huge waste of bandwidth. All you need to be able to reconstruct the world space position is the depth value and the inverse of your view and projection matrices. Also storing world space position in GL_RGB16F will very easily run into precision issues if you move your camera away from world space origin.

Opengl/glsl : Pass float array to shader by texture

I am trying to pass a float* that contains 128*128 float to a shader. Since this array is too big, I am trying to pass by a texture to get my float* in my shader. The problem is I don't know how to use this array inside my shader.
I print my float* before sending it to my shader, and It contains number between -1 and 1.
My goal is to simulate waves on water with perlin noise, so my array of float is perlin noises.
So I instantiate my noise texture like this :
_perlin_noise = new float[_dimension * _dimension];
//... I put float inside my _perlin_noise variable
//Then I instantiate my texture :
glGenTextures(1, &_perlin_noise_text);
glBindTexture(GL_TEXTURE_2D, _perlin_noise_text);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, _dimension, _dimension, 0,
GL_RED, GL_FLOAT, _perlin_noise);
glActiveTexture(0);
glBindTexture(GL_TEXTURE_2D, 0);
My water use two textures : One texture is full blue, the other is a texture of foam.
So now I have a third texture that is my perlin noise. And I want to ajust the height of my vertices by perlin noise.
So before anything, I alocate uniform variable to my shader like this :
main_water_shader->use();
glUniform1i(glGetUniformLocation(main_water_shader->getProgram(), "main_water_texture"), 0);
glUniform1i(glGetUniformLocation(main_water_shader->getProgram(), "foam_texture"), 1);
glUniform1i(glGetUniformLocation(main_water_shader->getProgram(), "perlin_noise"), 2);
Then I want to ajust vertices's height in my vertex shader, but I don't know how to do it. I tried to do that in my vertex shader :
uniform sampler2D perlin_noise;
//some other uniform...
void main()
{
//calculating vertex height
float height = float( texture2D(perlin_noise, vec2(x, y)) );
vec3 new_vertex_position = vertex_position;
new_vertex_position.y = height;
//Standard stuff
vs_position = vec4(ModelMatrix * vec4(new_vertex_position, 1.f)).xyz;
vs_texcoord = vec2(vertex_texcoord.x, vertex_texcoord.y );
vs_normal = mat3(ModelMatrix) * vertex_normal;
gl_Position = ProjectionMatrix * ViewMatrix * ModelMatrix * vec4(new_vertex_position, 1.f);
}
Nothing change, the height of my vertices is still the same. And if I try to display my perlin_noise texture in my fragment shader onto a plan, I get this :
You can see my plane (which is very large) with a black and white texture on it that is repeated. So I guess my perlin_noise texture does contains something (even if it is a bit weird), but I can't figure out how to use it in my vertex shader.
EDIT : In my screenshoot, I also use a vertex shader with height ajusted with perlin noise, but as you can see, my plane is plane, so It does not seems to work.
EDIT2 : If I'm not clear enough, tell me

passing a float array as a 3D Texture to GLSL fragment shader

I'm trying to implement ray casting based volume rendering and therefore I'd need to pass a float Array to the fragment shader as a Texture (Sampler3D).
I've got a volume datastructure containing all the voxels. Each voxel contains a density value. So for processing I stored the values into a float Array.
//initialize glew, initialize glfw, create window, etc.
float* density;
density = new float[volume->size()];
for (int i = 0; i < volume->size(); i++){
density[i] = volume->voxel(i).getValue();
}
Then I tried creating and binding the textures.
glGenTextures(1, &textureHandle);
glBindTexture(GL_TEXTURE_3D, textureHandle);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_REPEAT);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE, volume->width(),
volume->height(), volume->depth(), 0, GL_LUMINANCE, GL_FLOAT, density);
In my render loop I try to load the Texture to the uniform Sampler3D.
glClearColor(0.4f, 0.2f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
GLint gSampler = glGetUniformLocation(shader->shaderProgram, "volume");
glUniform1i(gSampler, 0);
cube->draw();
So the basic idea is to calculate the current position and direction for ray casting in the Vertex Shader.
in vec3 position;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform vec4 cameraPos;
out vec3 pos;
out vec3 dir;
void main(){
gl_Position = projection * view * model * vec4(position, 1.0);
pos = position;
dir = pos - (inverse(model) * cameraPos).xyz;
}
That seems to work well, so far so good. The fragment shader looks like this. I take some samples along the ray and the one with the largest density value will be taken as a color for red, green and blue.
#version 330 core
in vec3 pos;
in vec3 dir;
uniform sampler3D volume;
out vec4 color;
const float stepSize = 0.008;
const float iterations = 1000;
void main(){
vec3 rayDir = normalize(dir);
vec3 rayPos = pos;
float src;
float dst = 0;
float density = 0;
for(int i = 0; i < iterations; i++){
src = texture(volume, rayPos).r;
if(src > density){
density = src;
}
rayPos += rayDir * stepSize;
//check whether rays are within bounds. if not -> break.
}
color = vec4(density, density, density, 1.0f);
}
Now I've tried inserting some small debug assertions.
if(src != 0){
rayPos = vec3(1.0f);
break;
}
But src seems to be 0 at every iteration of every pixel. Which gets me to the conclusion that the Sampler isn't correctly set. Debugging the C++ code I get the correct values for the density array right before I pass it to the shader, so I guess there must be some opengl function missing. Thanks in advance!
glTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE, volume->width(), volume->height(), volume->depth(), 0, GL_LUMINANCE, GL_FLOAT, density);
Unless this density is on the range [0, 1], then this is almost certainly not doing what you intend.
GL_LUMINANCE, when used as an internal format (the third parameter to glTexImage3D, means that each pixel in OpenGL's texture data will contain a single normal integer value. So if you want a floating-point value, you're kinda out of luck.
The proper way to do this is to explicitly declare the type and pixel size of the data. Luminance was removed from the core OpenGL profile back in 3.1, so the way to do that today is to use GL_R32F as your internal format. That declares that each pixel contains one value, and that value is a 32-bit float.
If you really need to broadcast the value across the RGB channels, you can use texture swizzling to accomplish that. You can set a swizzle mask to broadcast the red component to any other channel you like.
glActiveTexture(GL_TEXTURE0);
GLint gSampler = glGetUniformLocation(shader->shaderProgram, "volume");
glUniform1i(gSampler, 0);
I've heard that binding the texture is also a good idea. You know, if you actually want to read from it ;)

OpenGL, blending transparent textures with object color

I can set my fragment shader to showing colors of the object or the texture color of the object. My question is, how can I combine those two so that I can have a transparrent picture with lines for bricks, and then show the different colors underneath, so that by changing the color of the object, you change the color of the bricks.
I tried using mix() for that in the fragment shader, but it only shows me the glClearColor where it is transparent insteath of the red color I have assigned it!
My fragment shader:
#version 120
uniform sampler2D diffuse;
varying vec3 shared_colors;
varying vec2 shared_texCoords;
void main() {
vec4 color = vec4(shared_colors, 1);
vec4 texture = texture2D(diffuse, shared_texCoords);
vec4 finalColor = vec4(mix(color.rgb, texture.rgb, 1), 1);
gl_FragColor = finalColor;
}
EDIT: Added texture loader func:
void Texture::createTexture(SDL_Surface *rawImage, GLenum format) {
//Convert to a texture of pure color pixels for OpenGL
SDL_Surface *image = SDL_CreateRGBSurface(NULL, rawImage->w, rawImage->h, 32, 0, 0, 0, 0);
SDL_BlitSurface(rawImage, NULL, image, NULL);
//Generate texture
glGenTextures(1, &m_texture);
//Tell OpenGL to use this texture
glBindTexture(GL_TEXTURE_2D, m_texture);
//Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//Generate texture from image
glTexImage2D(GL_TEXTURE_2D, 0, 4, image->w, image->h, 0, format, GL_UNSIGNED_BYTE, image->pixels);
m_dimension = new PixelSize(image->w, image->h);
//Free loaded images
SDL_FreeSurface(rawImage);
SDL_FreeSurface(image);
}
You should take a closer look at mix (...) to understand why using 1 for a effectively does nothing meaningful (it returns y).
Let us start by considering the usual alpha blending function: GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA.
This means to take the alpha channel of the source and multiply it by the source color (fragment color) and add that to the inverse of the alpha channel multiplied by the destination color (framebuffer).
  AlphaBlend = SrcRGBA * SrcA + DstRGBA * (1.0 - SrcA)
Now, if you look at the implementation of mix (...) it should look quite familiar:
  x * (1.0 - a) + y * a
Clearly, this is order dependent for any value of a != 0.5 (1.0 - 0.5 = 0.5) and more importantly in this case it completely throws out one of your colors if you use a value of 1.0 for a (as this multiplies x by 0.0).

Alpha channel value always returning 1.0 after rendering-to-texture in OpenGL

This problem is driving me crazy since the code was working perfectly before. I have a fragment shader which combines two textures based on the value set in the alpha channel. The output is rendered to a third texture using an FBO.
Since I need to perform a post-processing step on the combined texture, I check the value of the alpha channel to determine whether that texel will need post-processing or not (i.e., I'm using the alpha channel value as a mask). The problem is, the post-processing shader is reading a value of 1.0 for all the texels in the input texture!
Here is the fragment shader that combines the two textures:
uniform samplerRect tex1;
uniform samplerRect tex2;
in vec2 vTexCoord;
out vec4 fColor;
void main(void) {
vec4 color1, color2;
color1 = texture(tex1, vTexCoord.st);
color2 = texture(tex2, vTexCoord.st);
if (color1.a == 1.0) {
fColor = color2;
} else if (color2.a == 1.0) {
fColor = color1;
} else {
fColor = (color1 + color2) / 2.0;
}
}
The texture object that I attach to the FBO is set up as follows:
glGenTextures(1, &glBufferTex);
glBindTexture(GL_TEXTURE_RECTANGLE, glBufferTex);
glTexParameteri(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_RECTANGLE, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
Code that attaches the texture to the FBO is:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_RECTANGLE, glBufferTex, 0);
I even added a call to glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE) before attaching the FBO! What could possibly be going wrong that is making the next stage fragment shader read 1.0 for all texels?!
NOTE: I did check that not all the values of the alpha channel for texels in the two textures that I combine are 1.0. Most of them actually are not.