Drawing multiple 2D textures in OpenGL - c++

I am writing my own game library in C++ using Visual Studio 2013 Ultimate. I have managed to get some basic drawing working for 1 texture, but when I add another it appears to 'overwrite' the previous.
For example, if I have texture A and texture B, B will get drawn where A should have been. I have checked that I am not using the same texture ID for each texture.
I have taken a look at OpenGL trying to Draw Multiple 2d Textures, only 1st one appears, but this did not solve my problem.
I am quite new to OpenGL and lower level graphics programming in general, could someone please explain to me what is wrong with my code?
Here is my texture loading code:
Texture::Texture(const std::string& filePath)
{
m_pixelData = PixelData(stbi_load(filePath.c_str(),
&m_size.x,
&m_size.y,
&m_numberOfImageComponents,
0 /* number of requested image components if non-zero */));
// generate 1 texture name and store it in m_textureName
glGenTextures(1, &m_textureID);
// make this the active texture
glBindTexture(GL_TEXTURE_2D, m_textureID);
// specifies how the data to be uploaded is aligned
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// set texture parameters (need to look up the details of what's going on here)
// TODO work out what this does
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// TODO work out what this does
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
GLenum imageFormat;
// Work out image format
// TODO add proper error handling
switch(m_numberOfImageComponents)
{
case 4:
imageFormat = GL_RGBA;
break;
case 3:
imageFormat = GL_RGB;
break;
default:
DDGL_LOG(ERROR, "No image formats with " + std::to_string(m_numberOfImageComponents) + "components known");
throw;
}
// upload the texture to VRAM
glTexImage2D(GL_TEXTURE_2D,
0,
imageFormat,
m_size.x,
m_size.y,
0,
imageFormat,
GL_UNSIGNED_BYTE,
m_pixelData.get());
}
Here is my 'start draw' code
void GraphicsAPIWrapper::startDraw()
{
// TODO does this need to be called every frame?
glMatrixMode(GL_PROJECTION); // select the matrix
glLoadIdentity(); //reset the matrix
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
Here is my drawing code
void GraphicsAPIWrapper::draw(std::shared_ptr<IDrawableGameObject> drawableObject)
{
glPushMatrix();
// convert pixel coordinates to decimals
const Vector2F floatWindowSize ((float) getWindowSize().x, (float) getWindowSize().y);
const Vector2F floatObjectSize ((float) drawableObject->getSize().x, (float) drawableObject->getSize().y);
const Vector2F relativeObjectSize (floatObjectSize.x / floatWindowSize.x, floatObjectSize.y / floatWindowSize.y);
const Vector2F relativeObjectPosition (drawableObject->getPosition().x / floatWindowSize.x, drawableObject->getPosition().y / floatWindowSize.y);
// transformations
glTranslatef(relativeObjectPosition.x, relativeObjectPosition.y, 0.0f);
glRotatef(drawableObject->getRotation(), 0.0f, 0.0f, 1.0f);
// TODO should this be triangles or quads? I've been told triangles are generally higher performance
// right now QUADS are simpler though
glBegin(GL_QUADS);
glBindTexture(GL_TEXTURE_2D, drawableObject->getTextureID());
glTexCoord2f(0.0f, 1.0f); glVertex3f(-relativeObjectSize.x, -relativeObjectSize.y, 1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f(relativeObjectSize.x, -relativeObjectSize.y, 1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f(relativeObjectSize.x, relativeObjectSize.y, 1.0f);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-relativeObjectSize.x, relativeObjectSize.y, 1.0f);
glEnd();
glPopMatrix();
}
Here is my 'end draw' code
void GraphicsAPIWrapper::endDraw()
{
glfwSwapBuffers(m_window->getWindow());
}
So, just to be extremely clear, the behavior I am getting is that one texture is getting drawn everywhere, rather than different textures as desired.

You are calling glBindTexture() inside a glBegin/glEnd-Block, which is invalid and will have no effect besides generating an error. Hence, the one really bound is the one last bound before that - very likely to be the bind operation when you load your texture, so the last one loaded is the one shown for all objects...

Related

How can i map textures to objects in legacy OpenGL 1.x?

I want to draw some indicators for use in an older software using SDL2 and OpenGL 1.6 (therefore i can't switch to modern OpenGL for now) and have decided to build them from basic shapes. I've gotten the drawing logic working properly, but can't get the texturing to work. It actually completely broke the program. I want to keep an OOP approach and be able to draw any object/shape separately, by just calling a drawing method.
At first i create the window:
void SDLWindow::createWindow(const std::string windowTitle)
{
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
puts("Could not init SDL");
return;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 6);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 4);
SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, 1);
Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_ALWAYS_ON_TOP
| SDL_WINDOW_RESIZABLE;
this->mainWindow = SDL_CreateWindow(windowTitle.c_str(),
SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, this->width, this->height,
flags);
if (nullptr == mainWindow)
{
puts("Window could not be created");
return;
}
this->context = SDL_GL_CreateContext(this->mainWindow);
if (nullptr == this->context)
{
puts("Could not create context");
return;
}
SDL_GL_SetSwapInterval(1);
SDL_RaiseWindow(this->mainWindow);
}
Then i initialise OpenGL specifics:
void SDLWindow::initGL()
{
glClearDepth(1.0f);
glDepthFunc(GL_LEQUAL); // Type Of Depth Testing
// glEnable(GL_DEPTH_TEST);
glViewport(0, 0, this->width, this->height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0f, this->width, this->height, 0.0f, 0.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_MULTISAMPLE);
}
These settings are the ones used in the main app and, until i started adding textures, everything was working properly.
Loading the texture from a file:
void SDLWindow::loadTextureFromFile(char* path){
SDL_Surface* Surface = SDL_LoadBMP(path);
glGenTextures(1, &this->textureID);
glBindTexture(GL_TEXTURE_2D, this->textureID);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Surface->w, Surface->h, 0, GL_RGB, GL_UNSIGNED_BYTE, Surface->pixels);
SDL_FreeSurface(Surface);
}
Drawing a primitive shape with the texture applied to it:
void SDLWindow::drawBasicShape()
{
glBindTexture(GL_TEXTURE_2D, this->textureID); // commented this but doesn't change anything
glBegin( GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(this->width / 4, this->height / 4);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(3 * this->width / 4, this->height / 4);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(3 * this->width / 4, 3 * this->height / 4);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(this->width / 4, 3 * this->height / 4);
glEnd();
}
In the main loop, i make a call to renderRectCore() which first loads the texture and then draws the shape:
void SDLWindow::renderRectCore()
{
//Clear color buffer
glClear( GL_COLOR_BUFFER_BIT);
// glColor3f(1.0f, 0.0f, 0.0f);
glEnable(GL_TEXTURE_2D);
loadTextureFromFile("D:\Workspace\Eclipse\SDL_test\sample.bmp");
drawBasicShape();
}
As far as i can tell, the function that loads the texture somehow breaks the program and causes the window to open blank and immediately close. Removing the call to loadTextureFromFile simply draws the white rectangle, but keeps the window functioning as intended. When instantiating the class SDLWindow, textureID is initialized to 0.
Use a debugger to see which line of code is failing. But most likely SDL_LoadBMP returns NULL because
"D:\Workspace\Eclipse\SDL_test\sample.bmp"
is not the correct path.
You have to properly escape backslash characters in C++ string literals:
"D:\\Workspace\\Eclipse\\SDL_test\\sample.bmp"
(Or just use forward slashes as they are actually also supported by Windows).
Your code is also lacking the most basic error handling and will crash if the file is not found or not readable (or not the expected data format).
glEnable(GL_TEXTURE_2D);
loadTextureFromFile("D:\Workspace\Eclipse\SDL_test\sample.bmp");
There is no point in loading texture in the main loop.
Move it to init.

glutpostredisplay() equivalent in winapi

I am following this article to render a video onto a texture using OpenGL and winforms using C++.
I have changed the code in the renderer as follows. But the glutPostRedisplay(); is not working. The same logic works well when I am creating a OpenGL window and rendering over there. But does not seem to work well in winforms.
As of what I understood is that the glutPostRedisplay is trying to refresh my main winforms window and not the OpenGL viewport. I am not sure how to Refresh my viewport.
void OpenGLForm::COpenGL::Render(System::Void)
{
//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// These are necessary if using glTexImage2D instead of gluBuild2DMipmaps
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(frame_width, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(frame_width, frame_height);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0.0f, frame_height);
glEnd();
glFlush();
//glutSwapBuffers();
OpenGLForm::COpenGL::SwapOpenGLBuffers();
//Get data from the camera
uint32_t* buffer = new uint32_t[frame_width * frame_height * 4];
if (display_mode == DISPLAY_ARGB) {
// Pass a pointer to the texture directly into Thermal_GetImage for maximum performance
status = Thermal_GetDisplayImage(camera, buffer, (uint32_t)frame_pixels);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGBA8,
frame_width,
frame_height,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
buffer);
// Clean up buffer
delete[] buffer;
// Update display
glutPostRedisplay();
}
void OpenGLForm::COpenGL::SwapOpenGLBuffers(System::Void)
{
SwapBuffers(m_hDC);
}
glutPostRedisplay only works with a "glut"-window. (glutCreateWindow). You have to use a Win-API function to invalidate the client area of the window (e.g. InvalidateRect):
InvalidateRect(HWND, NULL, TRUE);

No alpha blending working for me in OpenGL 4.6

I am working on a particle system using OpenGL and version 460 shaders that are compiled to SPIR-V.
My particles are based on PNG textures with alpha and it's actually pretty straight forward, I have done it on earlier OpenGL versions before.
However, the particle in the output window remains to be a square, the corners are black - meaning, the alpha channel is drawn as black. I used different PNG files, one with the particle as grey on alpha or grey on black. I set in the fragment shader the forth component to 0.0 or 0.5 to see any change, but nothing changed. I tried to change the formats in the loadTexture function, but still the same.
I tried all different kinds of blending modes and changed the order of the lines. I noticed in RenderDoc, that the target blend is disabled. But no change of glEnable(GL_BLEND) to another position was helpful.
I would really appreciate your help!
// init stuff
glEnable(GL_POINT_SPRITE);
initbasicShaders(); // vertex, fragment
initTexture();
...
void application::initTexture()
{
m_ParticleTex = Texture(); // empty constructor
m_ParticleTex.loadTexture(TEXTURE_PATH);
GLint baseImageLoc = glGetUniformLocation(m_RenderProgramHandle, "u_Texture");
glUseProgram(m_RenderProgramHandle);
glUniform1i(baseImageLoc, 0);
glActiveTexture(GL_TEXTURE0 + 0);
glBindTexture(GL_TEXTURE_2D, m_ParticleTex.getTextureID());
}
// using the library stb_image
void Texture::loadTexture(const char* fileName)
{
int channels, width, height;
unsigned char * localBuffer;
m_FilePath = fileName;
localBuffer = stbi_load(fileName, &width, &height, &channels, 4);
glGenTextures(1, &m_TextureID);
glBindTexture(GL_TEXTURE_2D, m_TextureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, localBuffer);
glBindTexture(GL_TEXTURE_2D, 0);
if (localBuffer) {
stbi_image_free(localBuffer);
}
}
...
...
...
// Now - the render loop:
void application::runOpenGLBuffer()
{
Log::logInfoRun(m_Name, "OpenGL buffer");
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
glEnable(GL_POINT_SMOOTH);
glDisable(GL_DEPTH_TEST);
glEnable(GL_ALPHA_TEST);
glPointSize(PARTICLE_SIZE);
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glUseProgram(m_RenderProgramHandle);
this->updateTextures();
glDrawArrays(GL_POINTS, 0, NUM_PARTICLES);
}
...
// and the fragment shader
#version 460
#extension GL_ARB_separate_shader_objects : enable
layout(std430, binding = 3) buffer density_block
{
float density[];
};
void main()
{
// densitColor is just a variable color from density
vec4 particleColor = vec4(texture(u_Texture, gl_PointCoord) * densityColor);
color = particleColor;
}
And here the output in the render frame and information from RenderDoc:
The parameter to glEnable (like GL_BLEND or GL_POINT_SMOOTH) is a enumerator constant rather than a bits of a bit field. Different enumerator constants can't be concatenated by the | operator. Each state has to be enabled separately:
glEnable(GL_BLEND | GL_POINT_SMOOTH);
glEnable(GL_BLEND);
glEnable(GL_POINT_SMOOTH);
Note, the bitwise or operation calculates a new value by performing "or" to each bit of both values. The result may have not any meaning to glEnable. This causes that neither GL_BLEND nor GL_POINT_SMOOTH is enabled in your code.

OpenGL rendering with texture unclear

I'm using the latest version of OpenGL. But when I create textures using the following function is written in C++:
GLuint Texture::Generate2dTexture(int width, int height, char* data, int length)
{
GLuint textureIndex;
glGenTextures(1, &textureIndex);
glBindTexture(GL_TEXTURE_2D, textureIndex);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, width, height, GL_BGR_EXT, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
return textureIndex;
}
and render using:
void Renderer::DrawRectangle(GLuint textureID, RECTD rect)
{
glBindTexture(GL_TEXTURE_2D, textureID);
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 1.0f);
glVertex2d(rect.left, rect.top);
glTexCoord2f(0.0f, 0.0f);
glVertex2d(rect.left, rect.bottom);
glTexCoord2f(1.0f, 0.0f);
glVertex2d(rect.right, rect.bottom);
glTexCoord2f(1.0f, 1.0f);
glVertex2d(rect.right, rect.top);
}
glEnd();
}
They appear unclear after rendering. It seems quite different from the appearance in the image viewer made by M$.
What cause that to happen and how to avoid the risk?
Check the dimensions of your image. Most image loaders will expect images to be presented in powers of 2. So, an 8x8 pixel image should look clear (as opposed to stretched out or garbled). Try clipping the image to 32x32 or 128x128 etc to see if that helps. If the image you want cannot be represented where height=width you can still adjust the canvas so it is something like 128x128 and then simply use the UV coordinates to take the portion of the image you want for your texture.
Also, I think if you disable bilinear filtering any blurring should be taken care of:
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
Please note this is not a definite answer - I don't have enough rep to comment...

glTexSubImage2D does not work correctly

I've written a very simple OpenGL application. Its goal is to load a texture and draw it on a plane. If I use the function 'glTexSubImage2D' the plane is not textured and the function 'glGetError' returns the error '1281' (invalid value). However if I use the function 'glTexImage2D' my plane plane is textured correctly (and I have no error).
Here's a piece of my code :
void core::RootDevice::LoadTexture(char const *pFileName)
{
SDL_Surface *pSurface = IMG_Load(pFileName);
char *pPixels = reinterpret_cast<char*>(pSurface->pixels);
uint32_t bytePerPixel = pSurface->format->BitsPerPixel;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
{
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pSurface->w, //NO ERROR : All is ok
//pSurface->h, 0, GL_RGB, GL_UNSIGNED_BYTE, pPixels);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, pSurface->w, //ERROR : 1281
pSurface->h, GL_RGB, GL_UNSIGNED_BYTE, pPixels);
std::cout << glGetError() << std::endl;
getchar();
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And the rendering code :
void core::RootDevice::Render(void)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureId);
{
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f(1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(-1.0f, 1.0f, 0.0f);
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And the result is the followings:
Does anyone can help me?
glTexSubImage2D() is used to replace parts or all of a texture that already has image data allocated. You have to call glTexImage2D() on the texture at least once before you can use glTexSubImage2D() on it. Unlike glTexSubImage2D(), glTexImage2D() allocates image data. You can use NULL for the last (data) argument to glTexImage2D() if you only want to allocate image data, and later set the data with glTexSubImage2D().
Newer versions of OpenGL (4.4 and ES 3.0) have a new entry point glTexStorage2D() that can be used as an alternative to glTexImage2D() to allocate the image data for a texture without specifying the data. It is similar to calling glTexImage2D() with data = NULL, but also allows specifying ahead of time if space for mipmaps will be needed.