Here is the example opengl commands sequence:
glGenTextures(1, &texId);
std::cout << (int)glIsTexture(texId) << std::endl; //0
glBindTexture(GL_TEXTURE_2D, texId);
std::cout << (int)glIsTexture(texId) << std::endl; //1
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.getWidth(), img.getHeight(),
0, GL_BGR, GL_UNSIGNED_BYTE, img.accessPixels()); //when data == 0 glIsTexture returns the same results
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
std::cout << (int)glIsTexture(texId) << std::endl; //1
glDeleteTextures(1, &texId);
std::cout << (int)glIsTexture(texId) << std::endl; //0
I wonder when glIsTexture function is useful ? It looks like that the main usage is checking if texture has been deleted. glIsTexture also returns false when a texture has been generated but is not bound and initialized. Do you know any other scenarios ?
I wonder when glIsTexture function is useful ? It looks like that the main usage is checking if texture has been deleted.
If the renderer is properly architectured, then there is no need to check if a texture has been deleted (the code would already know).
I assume the designers felt exposing the state of the texture ID could be useful for either debugging or to implement some kind of pool of textures.
Related
I created a class for OpenGL textures and for some applications, a static object would be useful.
The problem is, when I create a texture as static or global object, my program crashes, while as local object, everything works fine. I have absolutely no idea what is going on.
This is my constructor:
Texture::Texture(std::string file, bool bitmap):
textureName("tex"), transparent(false) {
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
int texWidth, texHeight;
unsigned char *data = SOIL_load_image(file.c_str(), &texWidth, &texHeight, 0, SOIL_LOAD_RGBA);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texWidth, texHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
if(bitmap) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
glBindTexture(GL_TEXTURE_2D, 0);
for(int i = 3; i < texWidth * texHeight * 4; i+= 4) {
if(data[i] != 0xff) {
transparent = true;
break;
}
}
textureSize = glm::vec2(texWidth, texHeight);
SOIL_free_image_data(data);
}
OpenGL must be initialized before you can call any OpenGL functions. (More accurately: you need to have "made an OpenGL context current")
The way you do that depends on which libraries you are using.
One thing that all the libraries have in common is: there are some functions you need to call, which you usually call in main. (for example, glfwCreateWindow and glfwMakeContextCurrent in GLFW)
Since global objects are created before main is called, you are trying to call OpenGL functions before you have an OpenGL context. This doesn't work.
I have this C++ console application that creates a window and initializes an OpenGL context.
IDE: CodeBocks
Compilator: MinGW (x32)
OS: Windows 8.1 64bit
So when I compile I have a console and the window with the context. When I close the window first and then the console everything is okay.
However if I close the console first I get a segmentation fault from glDeleteTextures in the virtual destructor.
Here is how I initialize the texture:
texture::texture(const string& fileName)
{
int width, height, numComponents;
unsigned char* imageData = stbi_load(fileName.c_str(), &width, &height, &numComponents, 4);
if(imageData == NULL)
cout << "Could not open " << fileName << "." << endl;
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
glBindTexture(GL_TEXTURE, 0);
stbi_image_free(imageData);
}
void texture::useTexture(unsigned int textureUnit)
{
if(textureUnit < 0 || textureUnit > 32)
{
cout << "Texture unit not between 0 and 32. Setting it to 0..." << endl;
textureUnit = 0;
}
glActiveTexture(GL_TEXTURE0 + textureUnit);
glBindTexture(GL_TEXTURE_2D, m_texture);
}
texture::~texture()
{
glDeleteTextures(1, &m_texture);
}
Keep in mind that I wrote the same code, but compiled it with 64bit mingw and used 64bit glfw and it worked just fine... if that helps in any way.
I'm having trouble with rendering to texture output from this opengl example: http://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Text_Rendering_01
My framebuffer is setup as follows:
glGenTextures(1, &back_text);
glBindTexture(GL_TEXTURE_2D, back_text);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, WINDOW_WIDTH, WINDOW_HEIGHT, 0, GL_BGRA, GL_UNSIGNED_BYTE, 0);
glGenFramebuffersEXT(1, &font_buffer);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, font_buffer);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, back_text, 0);
In best case I can get filled rectangle with color where letter should appear (it appears like alpha is always 1.0).
I've tried original example and it works correctly.
My question is: do I need to enable something to make this work or do I need to take different approach?
Edit:
It turns out that I need to use
glutInitContextVersion(2,0); instead of
glutInitContextVersion(3,2); and world is happy place!! :)
I am generating a bitmap (1 byte per pixel) and attempting to use it for alpha blending. I am successfully using the bitmap, but it appears that the texture does not wrap lines as I expect.
When I use the following code, it wraps where I would expect, given the input image. I get the set of Xs that I expect.
std::ofstream file{ R"(FileName.txt)" };
file << "width: " << gs.width() << "\theight: " << gs.height() << "\n";
for (int i = 0; i < gs.height(); ++i)
{
for (int j = 0; j < gs.width(); ++j)
{
file << ((gs.alpha()[j + i * gs.width()]) ? 'X' : ' ');
}
file << "\n";
}
When I load the texture it appears that the width of the texture does not match gs.width(), since it wraps oddly.
This is the code that I use to create the texture and load it with the bitmap.
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, gs.width(), gs.height(), 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, gs.alpha());
Can anyone suggest what I might be doing wrong?
I've been messing around with framebuffers and render to texture and I came across the need to blit them. Again on some machines I get a GL_INVALID_OPERATION right after the glBlitFramebuffer call. Each texture bound to the framebuffer is setup the exact same way, all the same size and parameters. Also, when I try to blit one entire texture (previously succesfully rendered to) to another framebuffer, only the destination 'rectangle' to write in is smaller than the rectangle to read from (e.g. when I want to blit it to a quarter of the screen), it throws a GL_INVALID_OPERATION too.
EDIT: Actually it always throws the error whenever the rectangles to read from and draw to have a different size, so I can't blit to a texture of a different size, or the same size but a different sized 'render to' area...?
Everytime I blit to a manually generated framebuffer the status is checked through glCheckFramebufferStatus and it always returns GL_FRAMEBUFFER_COMPLETE.
-BIGGEST SNIP EVER-, see below for shorter 'source code', obviously a couple C++ errors and not complete, but its only for the GL calls
The OpenGL error occurs when I call the last method of the viewport (Viewport::blit) with the screen framebuffer as target (by passing NULL). It first sets the read buffer of its own framebuffer (the draw buffers were already set) and then it calls RenderTarget::blit which calls glBlitFramebuffer. In the blit method it binds both buffers, and you can see it calls glCheckFramebufferStatus there which does not return an error.
I've been reading this over and over but I can't seem to find the error that causes it. When I blit the color buffer I use GL_LINEAR, otherwise I use GL_NEAREST All color buffers use GL_RGB32F as internal format and the depth buffer (which I never blit) uses GL_DEPTH_COMPONENT32F
EDIT, a shorter example, just took all the GL calls and filled the params I used
glBindFramebuffer(GL_READ_FRAMEBUFFER, _GL_Framebuffer);
glReadBuffer(GL_COLOR_ATTACHMENT0 + index);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
// OpenGL error check, does not return an error
glBindFramebuffer(GL_READ_FRAMEBUFFER, _GL_Framebuffer);
GLenum status = glCheckFramebufferStatus(GL_READ_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
{
// Some error checking, fortunately status always turns out to be complete
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, screenWidth, screenHeight, 0, 0, screenWidth, screenHeight, target, (target == GL_COLOR_BUFFER_BIT) ? GL_LINEAR : GL_NEAREST);
// If the source/destination read/draw rectangles are different in size, GL_INVALID_OPERATION is cought here
And the Framebuffer/Texture creation:
glGenFramebuffer(1, &_GL_Framebuffer);
glGenTextures(1, &_GL_ZBuffer);
glBindTexture(GL_TEXTURE_2D, _GL_ZBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, screenWidth, screenHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, _GL_Framebuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT + 0, _GL_ZBuffer, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
int writeIndices[BUFFER_COUNT];
for(unsigned int i = 0; i < BUFFER_COUNT; ++i)
{
writeIndices[i] = i;
glGenTextures(1, &_GL_Texture);
glBindTexture(GL_TEXTURE_2D, _GL_Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, screenWidth, screenHeight, 0, GL_RGB, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, _GL_Framebuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, _GL_Texture, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// In the actual code each texture is obviously saved in an object
}
GLenum *enums = new GLenum[BUFFER_COUNT];
for(unsigned int i = 0; i < BUFFER_COUNT; ++i)
{
// Get index and validate
int index = *(indices + i); // indices = writeIndices
if(index < 0 || index >= maxAttachments)
{
delete[] enums;
return false;
}
// Set index
enums[i] = GL_COLOR_ATTACHMENT0 + index;
}
// Set indices
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _GL_Framebuffer);
glDrawBuffers(BUFFER_COUNT, enums);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
delete[] enums;
// OpenGL error check, no errors
After some careful reading I found out the difference in multisampling was the problem. The 'main' FBO was setup by SFML, so by simply setting the anti aliasing level on startup to 0 the problem was partially solved.
It now blits if the draw/read rectangles are unequal in size, but it keeps crashing on some machines where it is SUPPOSED to work.