I'm using Qt to create a simple sprite editor with OpenGL, but the image just doesn't show up, it's just a white quad on the screen.
I have checked my code, and I think everything is right. The quad is of the same size of the image and the texture id is not 0.
Here's my code:
Initialization:
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glDisable(GL_CULL_FACE);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, win_width, win_height, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
Texture loading:
QImage b = QImage(filename);
m_texture = QGLWidget::convertToGLFormat(b);
glGenTextures(1, &m_id);
glBindTexture(GL_TEXTURE_2D, m_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
m_texture.width(),
m_texture.height(),
0, GL_RGBA, GL_UNSIGNED_BYTE,
m_texture.bits());
Render:
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glBindTexture(GL_TEXTURE_2D, m_id);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2i(0, 0);
glTexCoord2f(1.0f, 0.0f);
glVertex2i(m_texture.width(), 0);
glTexCoord2f(1.0f, 1.0f);
glVertex2i(m_texture.width(), m_texture.height());
glTexCoord2f(0.0f, 1.0f);
glVertex2i(0, m_texture.height());
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
This is what I'm getting so far:
Try manually setting the OpenGL version to 3.2.
QGLFormat glFormat;
glFormat.setVersion(3, 2);
glFormat.setProfile(QGLFormat::CoreProfile);
QGLFormat::setDefaultFormat(glFormat);
After you do that, print your OpenGL version with glGetString().
If you're still getting 2.1, you didn't set it in the correct OpenGL context. I'm not sure exactly how you're making your OpenGL calls, but I was using QT Creator Designer, and was embedding a QGLWidget within the main window. So I was successfully able to set the OpenGL version by setting it within the MainWindow constructor
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
QGLFormat glFormat;
glFormat.setVersion(3, 2);
glFormat.setProfile(QGLFormat::CoreProfile);
QGLFormat::setDefaultFormat(glFormat);
ui->setupUi(this);
}
Related
I want to draw some indicators for use in an older software using SDL2 and OpenGL 1.6 (therefore i can't switch to modern OpenGL for now) and have decided to build them from basic shapes. I've gotten the drawing logic working properly, but can't get the texturing to work. It actually completely broke the program. I want to keep an OOP approach and be able to draw any object/shape separately, by just calling a drawing method.
At first i create the window:
void SDLWindow::createWindow(const std::string windowTitle)
{
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
puts("Could not init SDL");
return;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 6);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 4);
SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, 1);
Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_ALWAYS_ON_TOP
| SDL_WINDOW_RESIZABLE;
this->mainWindow = SDL_CreateWindow(windowTitle.c_str(),
SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, this->width, this->height,
flags);
if (nullptr == mainWindow)
{
puts("Window could not be created");
return;
}
this->context = SDL_GL_CreateContext(this->mainWindow);
if (nullptr == this->context)
{
puts("Could not create context");
return;
}
SDL_GL_SetSwapInterval(1);
SDL_RaiseWindow(this->mainWindow);
}
Then i initialise OpenGL specifics:
void SDLWindow::initGL()
{
glClearDepth(1.0f);
glDepthFunc(GL_LEQUAL); // Type Of Depth Testing
// glEnable(GL_DEPTH_TEST);
glViewport(0, 0, this->width, this->height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0f, this->width, this->height, 0.0f, 0.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_MULTISAMPLE);
}
These settings are the ones used in the main app and, until i started adding textures, everything was working properly.
Loading the texture from a file:
void SDLWindow::loadTextureFromFile(char* path){
SDL_Surface* Surface = SDL_LoadBMP(path);
glGenTextures(1, &this->textureID);
glBindTexture(GL_TEXTURE_2D, this->textureID);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Surface->w, Surface->h, 0, GL_RGB, GL_UNSIGNED_BYTE, Surface->pixels);
SDL_FreeSurface(Surface);
}
Drawing a primitive shape with the texture applied to it:
void SDLWindow::drawBasicShape()
{
glBindTexture(GL_TEXTURE_2D, this->textureID); // commented this but doesn't change anything
glBegin( GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(this->width / 4, this->height / 4);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(3 * this->width / 4, this->height / 4);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(3 * this->width / 4, 3 * this->height / 4);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(this->width / 4, 3 * this->height / 4);
glEnd();
}
In the main loop, i make a call to renderRectCore() which first loads the texture and then draws the shape:
void SDLWindow::renderRectCore()
{
//Clear color buffer
glClear( GL_COLOR_BUFFER_BIT);
// glColor3f(1.0f, 0.0f, 0.0f);
glEnable(GL_TEXTURE_2D);
loadTextureFromFile("D:\Workspace\Eclipse\SDL_test\sample.bmp");
drawBasicShape();
}
As far as i can tell, the function that loads the texture somehow breaks the program and causes the window to open blank and immediately close. Removing the call to loadTextureFromFile simply draws the white rectangle, but keeps the window functioning as intended. When instantiating the class SDLWindow, textureID is initialized to 0.
Use a debugger to see which line of code is failing. But most likely SDL_LoadBMP returns NULL because
"D:\Workspace\Eclipse\SDL_test\sample.bmp"
is not the correct path.
You have to properly escape backslash characters in C++ string literals:
"D:\\Workspace\\Eclipse\\SDL_test\\sample.bmp"
(Or just use forward slashes as they are actually also supported by Windows).
Your code is also lacking the most basic error handling and will crash if the file is not found or not readable (or not the expected data format).
glEnable(GL_TEXTURE_2D);
loadTextureFromFile("D:\Workspace\Eclipse\SDL_test\sample.bmp");
There is no point in loading texture in the main loop.
Move it to init.
I am following this article to render a video onto a texture using OpenGL and winforms using C++.
I have changed the code in the renderer as follows. But the glutPostRedisplay(); is not working. The same logic works well when I am creating a OpenGL window and rendering over there. But does not seem to work well in winforms.
As of what I understood is that the glutPostRedisplay is trying to refresh my main winforms window and not the OpenGL viewport. I am not sure how to Refresh my viewport.
void OpenGLForm::COpenGL::Render(System::Void)
{
//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// These are necessary if using glTexImage2D instead of gluBuild2DMipmaps
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(frame_width, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(frame_width, frame_height);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0.0f, frame_height);
glEnd();
glFlush();
//glutSwapBuffers();
OpenGLForm::COpenGL::SwapOpenGLBuffers();
//Get data from the camera
uint32_t* buffer = new uint32_t[frame_width * frame_height * 4];
if (display_mode == DISPLAY_ARGB) {
// Pass a pointer to the texture directly into Thermal_GetImage for maximum performance
status = Thermal_GetDisplayImage(camera, buffer, (uint32_t)frame_pixels);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGBA8,
frame_width,
frame_height,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
buffer);
// Clean up buffer
delete[] buffer;
// Update display
glutPostRedisplay();
}
void OpenGLForm::COpenGL::SwapOpenGLBuffers(System::Void)
{
SwapBuffers(m_hDC);
}
glutPostRedisplay only works with a "glut"-window. (glutCreateWindow). You have to use a Win-API function to invalidate the client area of the window (e.g. InvalidateRect):
InvalidateRect(HWND, NULL, TRUE);
I am writing a rendering system in CUDA and want results to be quickly displayed via OpenGL, without touching main memory. I basically do the following:
Create and initialize OpenGL texture, and register it in CUDA as cudaGraphicsResource
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initialize() {
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard)
}
Whenever view is resized I resize viewport and texture image appropriately:
void resize() {
glViewport(0, 0, view.getWidth(), view.getHeight());
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And then each frame I map graphicsResource as a cudaSurfaceObject via cudaArray, call rendering kernel on it, unmap and synchronize to let OpenGL draw a fullscreen quad with this texture:
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0);
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc);
{
invokeRenderingKernel(viewCudaSurfaceObject);
}
cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
cudaGraphicsUnmapResources(1, &viewCudaResource);
cudaStreamSynchronize(0);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
The problem is: Whenever view is resized all CUDA calls start spewing out "unknown error"s and visually it looks like the texture is not in fact resized, just stretched across the whole view. Why is this happening and how do I fix it?
It seems interop requires to re-register textures upon resize. The following works:
void resize() {
glViewport(0, 0, view.getWidth(), view.getHeight());
// unregister
cudaGraphicsUnregisterResource(viewCudaResource);
// resize
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
// register back
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
Essentially, I'm rendering a raw buffer of pixels onto a texture, which is wrapped onto a quad.
My GL initiation code:
void INITOGL(int xres, int yres){
glShadeModel(GL_SMOOTH);
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
glClearDepth(1);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
//gluOrtho2D(0, xres, 0, yres);
}
Render loop code:
void RenderCycle(){
register MSG msg = {0};
while(1){
if(PeekMessage(&msg, 0, 0, 0, PM_REMOVE)){
TranslateMessage(&msg);
DispatchMessage(&msg);
}
glEnable(GL_TEXTURE_2D);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
//glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);//GL_LINEAR
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE/*GL_DECAL*/);
glTexImage2D( GL_TEXTURE_2D, 0, 3, 800, 600, 0, GL_RGB, GL_UNSIGNED_BYTE, framebuffer);//viewport virtual size
glClear(GL_COLOR_BUFFER_B
IT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_POLYGON);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(0, 0, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f( 800, 0, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f( 800, 600, 0.0f);
glTexCoord2f(0.0f, 0.0f);
glVertex3f( 0, 600, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);
SwapBuffers(hdc);
}
}
The resize function:
void ResizeViewport(int height, int width){
if(!height)height++;
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
//gluPerspective(45.0f,(GLfloat)width/(GLfloat)height,0.1f,100.0f);
glMatrixMode(GL_PROJECTION);
glViewport(0,0,width,height);
//gluOrtho2D(0, width, 0, height);
glLoadIdentity();
}
The buffer that holds the RGB data is just set to red using a for loop, and this is the output.
Have I improperly set my viewport, not scaled something correctly, or am I just completely missing something? If you need any more information, just ask. I have given all of the opengl sided code, the rest shouldn't make a difference. In theory, it should render red to the entire screen.
I'm trying to render to a texture using an FBO. When trying to do so, gDEBugger shows the correct texture, but when drawing it on a quad its just "white" / the glColor4f.
Here is the code to create the texture, fbo and renderbuffer:
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glGenRenderbuffers(1, &rb);
glBindRenderbuffer(GL_RENDERBUFFER, rb);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, width, height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rb);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
Render to the texture:
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor4f(1.0f, 0.5f, 0.2f, 1.0f);
glBegin(GL_TRIANGLES);
glVertex3f(10, 10, 0);
glVertex3f(210, 30, 1);
glVertex3f(50, 150, 1);
glEnd();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And here is how I render the quad with the texture:
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(0, 0);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(width, 0);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(width, height);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(0, height);
glEnd();
glDisable(GL_TEXTURE_2D);
When drawing with a loaded image as a texture it works, but not with the FBO bound textures. Anyone got an idea on what is wrong with my code?
Your texture looks incomplete.
You don't have mipmaps for it, and you did not select a filtering mode that would work-around that.
Try:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
That said, you should still see the polygons, but without the proper texture, without this.