I am following this article to render a video onto a texture using OpenGL and winforms using C++.
I have changed the code in the renderer as follows. But the glutPostRedisplay(); is not working. The same logic works well when I am creating a OpenGL window and rendering over there. But does not seem to work well in winforms.
As of what I understood is that the glutPostRedisplay is trying to refresh my main winforms window and not the OpenGL viewport. I am not sure how to Refresh my viewport.
void OpenGLForm::COpenGL::Render(System::Void)
{
//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// These are necessary if using glTexImage2D instead of gluBuild2DMipmaps
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(frame_width, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(frame_width, frame_height);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0.0f, frame_height);
glEnd();
glFlush();
//glutSwapBuffers();
OpenGLForm::COpenGL::SwapOpenGLBuffers();
//Get data from the camera
uint32_t* buffer = new uint32_t[frame_width * frame_height * 4];
if (display_mode == DISPLAY_ARGB) {
// Pass a pointer to the texture directly into Thermal_GetImage for maximum performance
status = Thermal_GetDisplayImage(camera, buffer, (uint32_t)frame_pixels);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGBA8,
frame_width,
frame_height,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
buffer);
// Clean up buffer
delete[] buffer;
// Update display
glutPostRedisplay();
}
void OpenGLForm::COpenGL::SwapOpenGLBuffers(System::Void)
{
SwapBuffers(m_hDC);
}
glutPostRedisplay only works with a "glut"-window. (glutCreateWindow). You have to use a Win-API function to invalidate the client area of the window (e.g. InvalidateRect):
InvalidateRect(HWND, NULL, TRUE);
Related
I want to draw some indicators for use in an older software using SDL2 and OpenGL 1.6 (therefore i can't switch to modern OpenGL for now) and have decided to build them from basic shapes. I've gotten the drawing logic working properly, but can't get the texturing to work. It actually completely broke the program. I want to keep an OOP approach and be able to draw any object/shape separately, by just calling a drawing method.
At first i create the window:
void SDLWindow::createWindow(const std::string windowTitle)
{
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
puts("Could not init SDL");
return;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 6);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 4);
SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, 1);
Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_ALWAYS_ON_TOP
| SDL_WINDOW_RESIZABLE;
this->mainWindow = SDL_CreateWindow(windowTitle.c_str(),
SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, this->width, this->height,
flags);
if (nullptr == mainWindow)
{
puts("Window could not be created");
return;
}
this->context = SDL_GL_CreateContext(this->mainWindow);
if (nullptr == this->context)
{
puts("Could not create context");
return;
}
SDL_GL_SetSwapInterval(1);
SDL_RaiseWindow(this->mainWindow);
}
Then i initialise OpenGL specifics:
void SDLWindow::initGL()
{
glClearDepth(1.0f);
glDepthFunc(GL_LEQUAL); // Type Of Depth Testing
// glEnable(GL_DEPTH_TEST);
glViewport(0, 0, this->width, this->height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0f, this->width, this->height, 0.0f, 0.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_MULTISAMPLE);
}
These settings are the ones used in the main app and, until i started adding textures, everything was working properly.
Loading the texture from a file:
void SDLWindow::loadTextureFromFile(char* path){
SDL_Surface* Surface = SDL_LoadBMP(path);
glGenTextures(1, &this->textureID);
glBindTexture(GL_TEXTURE_2D, this->textureID);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Surface->w, Surface->h, 0, GL_RGB, GL_UNSIGNED_BYTE, Surface->pixels);
SDL_FreeSurface(Surface);
}
Drawing a primitive shape with the texture applied to it:
void SDLWindow::drawBasicShape()
{
glBindTexture(GL_TEXTURE_2D, this->textureID); // commented this but doesn't change anything
glBegin( GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(this->width / 4, this->height / 4);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(3 * this->width / 4, this->height / 4);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(3 * this->width / 4, 3 * this->height / 4);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(this->width / 4, 3 * this->height / 4);
glEnd();
}
In the main loop, i make a call to renderRectCore() which first loads the texture and then draws the shape:
void SDLWindow::renderRectCore()
{
//Clear color buffer
glClear( GL_COLOR_BUFFER_BIT);
// glColor3f(1.0f, 0.0f, 0.0f);
glEnable(GL_TEXTURE_2D);
loadTextureFromFile("D:\Workspace\Eclipse\SDL_test\sample.bmp");
drawBasicShape();
}
As far as i can tell, the function that loads the texture somehow breaks the program and causes the window to open blank and immediately close. Removing the call to loadTextureFromFile simply draws the white rectangle, but keeps the window functioning as intended. When instantiating the class SDLWindow, textureID is initialized to 0.
Use a debugger to see which line of code is failing. But most likely SDL_LoadBMP returns NULL because
"D:\Workspace\Eclipse\SDL_test\sample.bmp"
is not the correct path.
You have to properly escape backslash characters in C++ string literals:
"D:\\Workspace\\Eclipse\\SDL_test\\sample.bmp"
(Or just use forward slashes as they are actually also supported by Windows).
Your code is also lacking the most basic error handling and will crash if the file is not found or not readable (or not the expected data format).
glEnable(GL_TEXTURE_2D);
loadTextureFromFile("D:\Workspace\Eclipse\SDL_test\sample.bmp");
There is no point in loading texture in the main loop.
Move it to init.
I've written a very simple OpenGL application. Its goal is to load a texture and draw it on a plane. If I use the function 'glTexSubImage2D' the plane is not textured and the function 'glGetError' returns the error '1281' (invalid value). However if I use the function 'glTexImage2D' my plane plane is textured correctly (and I have no error).
Here's a piece of my code :
void core::RootDevice::LoadTexture(char const *pFileName)
{
SDL_Surface *pSurface = IMG_Load(pFileName);
char *pPixels = reinterpret_cast<char*>(pSurface->pixels);
uint32_t bytePerPixel = pSurface->format->BitsPerPixel;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
{
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pSurface->w, //NO ERROR : All is ok
//pSurface->h, 0, GL_RGB, GL_UNSIGNED_BYTE, pPixels);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, pSurface->w, //ERROR : 1281
pSurface->h, GL_RGB, GL_UNSIGNED_BYTE, pPixels);
std::cout << glGetError() << std::endl;
getchar();
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And the rendering code :
void core::RootDevice::Render(void)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureId);
{
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f(1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(-1.0f, 1.0f, 0.0f);
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And the result is the followings:
Does anyone can help me?
glTexSubImage2D() is used to replace parts or all of a texture that already has image data allocated. You have to call glTexImage2D() on the texture at least once before you can use glTexSubImage2D() on it. Unlike glTexSubImage2D(), glTexImage2D() allocates image data. You can use NULL for the last (data) argument to glTexImage2D() if you only want to allocate image data, and later set the data with glTexSubImage2D().
Newer versions of OpenGL (4.4 and ES 3.0) have a new entry point glTexStorage2D() that can be used as an alternative to glTexImage2D() to allocate the image data for a texture without specifying the data. It is similar to calling glTexImage2D() with data = NULL, but also allows specifying ahead of time if space for mipmaps will be needed.
I am writing a rendering system in CUDA and want results to be quickly displayed via OpenGL, without touching main memory. I basically do the following:
Create and initialize OpenGL texture, and register it in CUDA as cudaGraphicsResource
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initialize() {
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard)
}
Whenever view is resized I resize viewport and texture image appropriately:
void resize() {
glViewport(0, 0, view.getWidth(), view.getHeight());
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And then each frame I map graphicsResource as a cudaSurfaceObject via cudaArray, call rendering kernel on it, unmap and synchronize to let OpenGL draw a fullscreen quad with this texture:
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0);
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc);
{
invokeRenderingKernel(viewCudaSurfaceObject);
}
cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
cudaGraphicsUnmapResources(1, &viewCudaResource);
cudaStreamSynchronize(0);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
The problem is: Whenever view is resized all CUDA calls start spewing out "unknown error"s and visually it looks like the texture is not in fact resized, just stretched across the whole view. Why is this happening and how do I fix it?
It seems interop requires to re-register textures upon resize. The following works:
void resize() {
glViewport(0, 0, view.getWidth(), view.getHeight());
// unregister
cudaGraphicsUnregisterResource(viewCudaResource);
// resize
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
// register back
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
Essentially, I'm rendering a raw buffer of pixels onto a texture, which is wrapped onto a quad.
My GL initiation code:
void INITOGL(int xres, int yres){
glShadeModel(GL_SMOOTH);
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
glClearDepth(1);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
//gluOrtho2D(0, xres, 0, yres);
}
Render loop code:
void RenderCycle(){
register MSG msg = {0};
while(1){
if(PeekMessage(&msg, 0, 0, 0, PM_REMOVE)){
TranslateMessage(&msg);
DispatchMessage(&msg);
}
glEnable(GL_TEXTURE_2D);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
//glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);//GL_LINEAR
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE/*GL_DECAL*/);
glTexImage2D( GL_TEXTURE_2D, 0, 3, 800, 600, 0, GL_RGB, GL_UNSIGNED_BYTE, framebuffer);//viewport virtual size
glClear(GL_COLOR_BUFFER_B
IT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_POLYGON);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(0, 0, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f( 800, 0, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f( 800, 600, 0.0f);
glTexCoord2f(0.0f, 0.0f);
glVertex3f( 0, 600, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);
SwapBuffers(hdc);
}
}
The resize function:
void ResizeViewport(int height, int width){
if(!height)height++;
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
//gluPerspective(45.0f,(GLfloat)width/(GLfloat)height,0.1f,100.0f);
glMatrixMode(GL_PROJECTION);
glViewport(0,0,width,height);
//gluOrtho2D(0, width, 0, height);
glLoadIdentity();
}
The buffer that holds the RGB data is just set to red using a for loop, and this is the output.
Have I improperly set my viewport, not scaled something correctly, or am I just completely missing something? If you need any more information, just ask. I have given all of the opengl sided code, the rest shouldn't make a difference. In theory, it should render red to the entire screen.
How can I convert a .png image to an OpenGL surface, with SDL? what I have now:
typedef GLuint texture;
texture load_texture(std::string fname){
SDL_Surface *tex_surf = IMG_Load(fname.c_str());
if(!tex_surf){
return 0;
}
texture ret;
glGenTextures(1, &ret);
glBindTexture(GL_TEXTURE_2D, ret);
glTexImage2D(GL_TEXTURE_2D, 0, 3, tex_surf->w, tex_surf->h, 0, GL_RGB, GL_UNSIGNED_BYTE, tex_surf->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(tex_surf);
return ret;
}
and my code to draw the thing:
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex);
//Use blurry texture mapping (replace GL_LINEAR with GL_NEAREST for blocky)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glColor4f( 1.0, 1.0, 1.0, 1.0 ); //Don't use special coloring
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f(128.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f(128.0f, 128.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(0.0f, 128.0f, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);
The problem is that it only works with .bmp files, and they turn bluish, so what is wrong?
Also, when I try to load a .png, it shows up really weird.
Wrong colors can be caused by getting the channel order wrong. The code I have lying around for loading .bmp's uses GL_BGR instead of GL_RGB so I think that will solve your problem with bmp's.
The problem with your png image is more likely caused by the png being 32-bits per pixel. Probably the best solution for you is to inspect the format field of the SDL surface to determine to appropriate flags/values to pass to glTexImage2D.