How to load multiple textures into the fragment shader - c++

So I'm trying to texture an object in OpenGL and I have two textures that I want to mix together in the fragment shader (rock.tga and grass.tga). My fragment shader:
out vec3 color;
in vec2 uv;
uniform sampler2D grasstexture;
uniform sampler2D rocktexture;
To load a sampler2D texture here's what I did in my header file in C++:
OpenGP::EigenImage<vec3> image;
OpenGP::imread("grass.tga", image);
glGenTextures(0, &_tex);
glBindTexture(GL_TEXTURE_2D, _tex);
check_error_gl();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
check_error_gl();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F,
image.cols(), image.rows(), 0,
GL_RGB, GL_FLOAT, image.data());
check_error_gl();
tex_id = glGetUniformLocation(_pid, "grasstexture");
glUniform1i(tex_id, 0);
Which loads the grass texture into my fragment shader. Now what I want to know is how do I load in multiple textures. I can't copy the above code twice because one texture is always over-written by the other. So how can I load in multiple textures into the fragment shader?
EDIT: Here's where my _tex is declared:
class Mesh{
protected:
GLuint _tex;
GLuint _vpoint; ///< memory buffer
GLuint _vnormal; ///< memory buffer
public:
GLuint getProgramID(){ return _pid; }
void init(){
//code for the textures goes in here
Here's my updated code:
OpenGP::EigenImage<vec3> image;
glActiveTexture(GL_TEXTURE0);
OpenGP::imread("grass.tga", image);
//glGenTextures(1, &_tex);
glBindTexture(GL_TEXTURE_2D, _tex);
check_error_gl();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
check_error_gl();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F,
image.cols(), image.rows(), 0,
GL_RGB, GL_FLOAT, image.data());
check_error_gl();
tex_id = glGetUniformLocation(_pid, "grasstexture");
glUniform1i(tex_id, 0);
glActiveTexture(GL_TEXTURE1);
OpenGP::imread("rock.tga", image);
//glGenTextures(0, &_tex);
glBindTexture(GL_TEXTURE_2D, _tex);
check_error_gl();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
check_error_gl();
glTexImage2D(GL_TEXTURE_2D, 1, GL_RGB32F,
image.cols(), image.rows(), 0,
GL_RGB, GL_FLOAT, image.data());
check_error_gl();
tex_id = glGetUniformLocation(_pid, "rocktexture");
glUniform1i(tex_id, 1);

Opengl hides a lot of its context so you need to keep it in mind. You have three parts of the texture:
Texture name - just an integer, pass it around.
Active texture unit - context thing, set with GL_TEXTURE0...
Texture target - for starters GL_TEXTURE_2D is all you need.
Here is what you need to do in order to get a 2d texture working:
Create opengl texture name with glGenTextures
Set up the texture with following set of actions:
2.1 Set active texture unit with glActiveTexture
2.2 Specify the target of the active texture unit to which the named texture is bound glBindTexture. For this all three texture parts should be "under control".
2.3 Specify a two-dimensional texture image for the active texture unit with glTexImage2D
2.4 Set parameters for the active texture unit with glTexParameteri
In order to simultaneously use multiple textures make sure that they have unique name and texture unit:
3.1 Set active texture unit to 0
3.2 Bind one named texture to active texture unit
3.3 Pass texture unit (0) to shader
3.4 Set active texture unit to 1
3.5 Bind another named texture to active texture unit
3.6 Pass texture unit (1) to shader
...
And when you're done don't forget to delete all named textures with glDeleteTextures
The order of commands is not the only one possible and the set of commands is not minimal - it is just typical.
Original answer:
You need to set next active texture like this:
glActiveTexture(GL_TEXTURE1);
before you bind bind the next texture.
It will also work like:
std::vector<std::string> textures = {"grass.tga", "rock.tga"};
for(int i = 0; i < textures.size(); i++) {
glActiveTexture(GL_TEXTURE0 + i);
// create the texture
}

Related

OpenGL texture function always return 0 on integer data

I'm working on a deferred shading pipeline, and i stored some information into a texture, and this is the texture attached to my gbuffer
// objectID, drawID, primitiveID
glGenTextures(1, &_gPixelIDsTex);
glBindTexture(GL_TEXTURE_2D, _gPixelIDsTex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32UI, _width, _height, 0, GL_RGB_INTEGER, GL_UNSIGNED_INT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
And this is how i write IDs into it:
// some other gbuffer textures...
layout (location = 4) out uvec3 gPixelIDs;
gPixelIDs = uvec3(objectID, drawID, gl_PrimitiveID + 1);
After the geometry pass, i can read from it using the following code:
struct PixelIDs {
GLuint ObjectID, DrawID, PrimitiveID;
}pixel;
glBindFramebuffer(GL_READ_FRAMEBUFFER, _gBufferFBO);
glReadBuffer(GL_COLOR_ATTACHMENT4);
glReadPixels(x, y, 1, 1, GL_RGB_INTEGER, GL_UNSIGNED_INT, &pixel);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
So far, so good. The output is what i need.
But when i try to use this shader to display the object id on the screen(just for debug purpose)
uniform sampler2D gPixelIDsTex;
uint objID = uint(texture(gPixelIDsTex, fragData.TexCoords).r);
FragColor = vec4(objID, objID, objID, 1);
the result is 0 (I used the Snipaste to read the pixel color), which means i cant use the data in my following process.
Other gbuffer textures with data format in floating point (eg. vec4) all be fine, so i dont know why texture always return 0 on it
uniform sampler2D gPixelIDsTex;
Your texture is not a floating-point texture. It's an unsigned integer texture. So your sampler declaration needs to express that. Just as you write to a uvec3, so too must you read from a usampler2D.

Binding an empty texture to image unit produces segfault

I want to draw to the texture via a compute shader. For this I am trying to bind the texture to an image unit:
glBindImageTexture(0, texture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
But when I run the code it produces a segfault.
This is how I create the empty texture:
unsigned int texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, nullptr);
glBindTexture(GL_TEXTURE_2D, 0);
I used the following tutorial for compute shaders: https://antongerdelan.net/opengl/compute.html
According to https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glBindImageTexture.xhtml glBindImageTextureis only available in GL versions greater or equal to 4.2. After updating the GL version of my project everyone works fine now.

Passing array of single precision floats to GLSL shader using glBindImageTexture

I want to pass a 2D array of single precision floats to a GLSL shader. I am trying to use glBindImageTexture for this purpose.
Before the main OpenGL render loop I define the texture and fill it with random values using
glGenTextures(1,#imagetexture);
glBindTexture(GL_TEXTURE_2D,imagetexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
setlength(arrayvalues,imwidth*imheight);
for loop:=0 to imwidth*imheight-1 do arrayvalues[loop]:=random;
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, imwidth, imheight, 0, GL_R32F, GL_FLOAT, arrayvalues);
setlength(arrayvalues,0);
glBindImageTexture( 0, //unit
imagetexture, //texture
0, //level
false, //layered
0, //layer
GL_READ_WRITE, //access
GL_R32F); //format
The shader code is the bare minimum for testing
#version 430
layout (binding=0,r32f) uniform image2D my_image;
void main()
{
float f = imageLoad( my_image, ivec2(gl_FragCoord.xy) ).r;
//f = 0.5;
gl_FragColor=vec4(f,f,f,1.0);
}
The code all runs, but the output is always black? Any ideas what I have missed here?
The same surrounding code works fine for other shaders that do not rely on glBindImageTexture so I am assuming it is all OK.
If I unremark the f = 0.5 then I get a grey image, so it all seems OK otherwise.
Working code. The issue was me not originally calling glBindTexture before glBIndImageTexture and also using GL_R32F rather than GL_RED.
glGenTextures(1,#imagetexture);
glBindTexture(GL_TEXTURE_2D,imagetexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
setlength(arrayvalues,imwidth*imheight);
for loop:=0 to imwidth*imheight-1 do arrayvalues[loop]:=random;
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, imwidth, imheight, 0, GL_RED, GL_FLOAT, arrayvalues);
setlength(arrayvalues,0);
glBindImageTexture( 0, //unit
imagetexture, //texture
0, //level
false, //layered
0, //layer
GL_READ_WRITE, //access
GL_R32F); //format
If you use glGenTextures to create a texture name, that texture is considered unused until you first bind it. And I don't mean glBindImageTexture; I mean glBindTexture.
glBindImageTexture does not bind the texture to a texture unit; it binds the texture to an image unit for use as an image. It does not bind it to the context for manipulation purposes. So calls to functions like glTexParameter don't care about what you've bound to image units.
You have to bind the texture with glBindTexture, then establish its information like the size and so forth, and after that, bind it for use as an image.

Is a texture required to bound to be sampled?

I was under the impression if you set your sampler uniforms to the correct texture unit, it doesn't matter if the currently bound texture target is 0 or not. For example,
glActiveTexture(GL_TEXTURE1);
glGenTextures(1, &mytexture);
glBindTexture(GL_TEXTURE_2D, mytexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, my_data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0); // This is the line I'm wondering about
Sometime later when drawing ...
glUniform1i(glGetUniformLocation(program, "mysampler"), 1);
//draw_stuff
Unfortunately, the screen is all black unless I keep GL_TEXTURE_2D bound to mytexture. Is it illegal to sample when GL_TEXTURE_2D is bound to 0???
Exactly, think about GL_TEXTUREN as a slot of several texture target types (GL_TEXTURE_2D, GL_TEXTURE_3D etc). While activating GL_TEXTURE1 and binding a texture to GL_TEXTURE_2D you're telling the driver that 2d texture in slot 1 is going to be set to "mytexture".
Then you need to pass this information to your shader as well:
glUniform1i(glGetUniformLocation(program, "mysampler"), 1);
This simply tells your sampler2D in your shader that it should look for GL_TEXTURE_2D in slot 1. If you unbind the texture it will have nothing to sample from.

Using the depth buffer for floating point rendertargets in OpenGL 2.1

I would like to use the depth buffer to store the depth values of particles in eye space in a 2D texture by using OpenGL 2.1 / GLSL 1.2.
So far I found a way to use the colorbuffer
// create texture
glGenTextures(1, &g_hDepthTexture);
glBindTexture(GL_TEXTURE_2D, g_hDepthTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, g_windowWidth, g_windowHeight, 0, GL_RGBA, GL_FLOAT, 0);
// create framebuffer
glGenFramebuffersEXT(1, &g_hFBO);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, g_hFBO);glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, g_hDepthTexture, 0);
However, I don't need the BGA components. Hence, I have tried to use the depth buffer, but the following code clamps each value in the texture to 0...1
// create texture
glGenTextures(1, &g_hDepthTexture);
glBindTexture(GL_TEXTURE_2D, g_hDepthTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, g_windowWidth, g_windowHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0); // create framebuffer
glGenFramebuffersEXT(1, &g_hFBO);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, g_hFBO);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, g_hDepthTexture, 0);
I would like to know how to use the depth buffer (probably how to choose the correct internal format / format) so that the texture values are not clamped.
Normalized integer image formats are always clamped. That's why they're normalized integers. If you want an unclamped format, then you need floating point values.
I would suggest using an actual 1-channel floating-point image format, such as GL_R32F. Maybe GL_R16F, depending on how much precision I need. If you don't have GL 3.x hardware, you may be able to use GL_LUMINANCE32F_EXT, depending on what extensions are available.
BTW, if you're doing this for deferred rendering, don't bother. You can actually calculate the eye-space point directly from the regular depth buffer. Yes, really.