Rendering 2D image using OpenGL - c++

I am trying to render a 2D image using OpenGL(for rendering) and DevIL(for loading image). But nothing gets rendered. Upon error checking I have found that OpenGL throws Invalid Operation on wglMakeCurrent call.
Following is my initialization function
void CImageMainView::InitializeOpenGL()
{
m_pDC = new CClientDC(this);
m_hDC = m_pDC->GetSafeHdc();
SetupPixelFormat();
m_hRC = ::wglCreateContext(m_hDC);
BOOL ret = ::wglMakeCurrent(m_hDC, m_hRC);
if(!ret){
printf("Error making current context\n");
}
printf("wglMakeCurrent ");
CheckGLError();
GetOpenGLExtendedInformation();
::wglMakeCurrent(NULL, NULL);
printf("wglMakeCurrent to null");
CheckGLError();
return;
}
SetupPixelFormat function looks like following
void CImageMainView::SetupPixelFormat()
{
static PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), // size of this pfd
1, // version number
PFD_DRAW_TO_WINDOW | // support window
PFD_SUPPORT_OPENGL | // support OpenGL
PFD_DOUBLEBUFFER, // double buffered
PFD_TYPE_RGBA, // RGBA type
24, // 24-bit color depth
0, 0, 0, 0, 0, 0, // color bits ignored
0, // no alpha buffer
0, // shift bit ignored
0, // no accumulation buffer
0, 0, 0, 0, // accum bits ignored
32, // 32-bit z-buffer
0, // no stencil buffer
0, // no auxiliary buffer
PFD_MAIN_PLANE, // main layer
0, // reserved
0, 0, 0 // layer masks ignored
};
m_PixelFormat = ::ChoosePixelFormat(m_hDC, &pfd);
::SetPixelFormat(m_hDC, m_PixelFormat, &pfd);
return;
}
and lastly the error checking function is
void CImageMainView::CheckGLError()
{
const GLenum err = glGetError();
printf("GLError: %s\n", gluErrorString(err));
}
I do have another View in my application that renders 3D stuff using OpenGL. Could that be the reason?

Related

win32 PIXELFORMATDESCRIPTOR color bit count and color bit shift

So in creating a context for openGL using win32 I have found this generic pixel format descriptor that everyone seems to be referencing. However after looking at the documentation I have found that the documentation for the color bits and color bit shifts does not describe how to actually format the buffer, and I cannot find any more information online.
Is there some place I can get more information on the color bit count and color bit shifts function, or have a description of how they work.
PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR),
1,
PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, // Flags
PFD_TYPE_RGBA, // The kind of framebuffer. RGBA or palette.
32, // Colordepth of the framebuffer.
0, 0, 0, 0, 0, 0,
0,
0,
0,
0, 0, 0, 0,
24, // Number of bits for the depthbuffer
8, // Number of bits for the stencilbuffer
0, // Number of Aux buffers in the framebuffer.
PFD_MAIN_PLANE,
0,
0, 0, 0
};

Save an opengl texture in tiff file from an other thread

I'm trying to save a texture in tiff file from an other thread. But the only result i get is a white picture, I think it come from the glcontext ( because it's not possible to have one glcontext for several thread). That's why i've tried to create two glcontext and share the display context. But still i don't have the gl texture. I can't get the texture from the second opengl context
. I'm tring to do that because at the end the texture will be a video stream from a camera .
Here is my context creation :
static PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
8, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
GLuint PixelFormat;
// create the pixel pixel format descriptor
PixelFormat = ChoosePixelFormat(dc, &pfd);
// set the pixel format descriptor
SetPixelFormat(dc, PixelFormat, &pfd);
gl = wglCreateContext(dc);
gl2 = wglCreateContext(dc);
wglShareLists(gl, gl2);
wglMakeCurrent(dc, gl);
GLenum g= glewInit();
wglewInit();
loadImage();
rec = new Recorder(dc,gl2);
rec->Start_StopRecord(text, true);
Here is the code to save to tiff file :
Recorder::Recorder(HDC &hdc, HGLRC &_gl)
{
isStarted = false;
dc = hdc;
gl = _gl;
}
Recorder::~Recorder()
{
if (isStarted) {
isStarted = false;
recordThread.join();
CloseTifFile();
delete mp_fileTifIn;
}
}
void Recorder::Start_StopRecord(GLuint Texture, bool launched){
if (launched) {
if (isStarted) {
wglMakeCurrent(dc, gl);
isStarted = false;
recordThread.join();
CloseTifFile();
pixels.release();
}
else {
isStarted = true;
//wglMakeCurrent(NULL, NULL);
//RecordShot(&Texture);
recordThread = std::thread(&Recorder::RecordShot, this,&Texture);
}
}
}
void Recorder::RecordShot(GLuint* texture){
wglMakeCurrent(dc, gl);
OpenTifFile(*texture);
pixels = std::unique_ptr<int>(new int[width*height]);
//while (isStarted) {
WriteTif8Bits(*texture);
WriteDirectory();
//Sleep(16);
//}
pixels.release();
}
void Recorder::OpenTifFile(GLuint &Texture){
char* filename="../test3.tiff";
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_HEIGHT,&height);
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_WIDTH,&width);
mp_fileTifIn = TIFFOpen(filename,"w");
}
void Recorder::CloseTifFile(){
TIFFClose(mp_fileTifIn);
}
/*
* Open Sub data for a Tiff file (allow multiple picture in one tif file)
*/
void Recorder::WriteDirectory(){
TIFFWriteDirectory(mp_fileTifIn);
}
void Recorder::WriteTif8Bits(GLuint &Texture){
//Setup Tiff Configuration
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGEWIDTH,width);
TIFFSetField(mp_fileTifIn,TIFFTAG_IMAGELENGTH,height);
TIFFSetField(mp_fileTifIn,TIFFTAG_SAMPLESPERPIXEL,4);
TIFFSetField(mp_fileTifIn,TIFFTAG_BITSPERSAMPLE,8);
TIFFSetField(mp_fileTifIn,TIFFTAG_ROWSPERSTRIP,TIFFDefaultStripSize(mp_fileTifIn,width));
TIFFSetField(mp_fileTifIn,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT);
TIFFSetField(mp_fileTifIn,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG);
TIFFSetField(mp_fileTifIn, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
TIFFSetField(mp_fileTifIn, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
glBindTexture(GL_TEXTURE_2D,Texture);
assert(glGetError() == GL_NO_ERROR);
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,pixels.get());
assert(glGetError() == GL_NO_ERROR);
//Image Reversal
Reverse(pixels.get(), height, width);
//Write one picture
/*for (int row = 0; row < height; row++) {
TIFFWriteScanline(mp_fileTifIn, pixels.get(), row, 0);
lineChange(pixels.get(), width);
}*/
TIFFWriteEncodedStrip(mp_fileTifIn, 0, pixels.get(), height*width * sizeof(int));
}
void Recorder::lineChange(int* pointer, int width) {
pointer -= width;
}
void Recorder::Reverse(int* pointer, int height, int width) {
pointer += (height - 1) * width;
}
And here is the loadImages function
int loadImage() {
wglMakeCurrent(dc, gl);
cv::Mat image;
image = cv::imread(std::string("C:/Users/Public/Pictures/Sample Pictures/Desert.jpg"), CV_LOAD_IMAGE_COLOR);
if (!image.data)
return -1;
cvNamedWindow("try", cv::WINDOW_AUTOSIZE);
cv::imshow("try", image);
cv::flip(image, image, 0);
glGenTextures(1, &text);
GLenum g=glGetError();
glBindTexture(GL_TEXTURE_2D, text);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.cols, image.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, image.ptr());
return 0;
}
Here is a test project where i'm loading a picture with opencv and i try to save it in an other thread : https://mega.nz/#!SBMUnJRI!dLC_l9hmCkhIDDUaygHuq4Kw2SKIuxRE7m19md74p0k
To run the project you need Opencv and glew, libtiff is already packaged inside
If you think somethink is missing for this post, i invite you to comment it before downgrade as i'm following my subject
I finally solved my problem by doing all opengl action in one thread ( i retrieve the image and display it in one thread, and i save it in another, which doesn't need an openglcontext)
An other thing that were confusing me is a bad configuration of
glGetTexImage(GL_TEXTURE_2D,0,GL_RGBA,GL_UNSIGNED_BYTE,this->rec[0].pixels.get());
But my textures are GL_TEXTURE_RECTANGLE_NV, that's why i had only a white picture sometimes.

Does different opengl context has different opengl extension?

I am working on a project using opengl off-screen rendering.But after I create opengl context, I found some opengl extension is unusable.for example:
#include <windows.h>
#include <GL/glew.h>
#include <iostream>
#include <gl/gl.h>
#include <gl/glu.h>
#include <string>
#include <time.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
void mGLRender()
{
glClearColor(0.9f, 0.9f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(30.0, 1.0, 1.0, 10.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, -5, 0, 0, 0, 0, 1, 0);
glBegin(GL_TRIANGLES);
glColor3d(1, 0, 0);
glVertex3d(0, 1, 0);
glColor3d(0, 1, 0);
glVertex3d(-1, -1, 0);
glColor3d(0, 0, 1);
glVertex3d(1, -1, 0);
glEnd();
glFlush(); // remember to flush GL output!
}
void mGLRender1()
{
glClearColor(0.3f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(30.0, 1.0, 1.0, 10.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, -5, 0, 0, 0, 0, 1, 0);
glBegin(GL_TRIANGLES);
glColor3d(1, 0, 0);
glVertex3d(0, 1, 0);
glColor3d(0, 1, 0);
glVertex3d(-1, -1, 0);
glColor3d(0, 0, 1);
glVertex3d(1, -1, 0);
glEnd();
glFlush(); // remember to flush GL output!
}
int main(int argc, char* argv[])
{
clock_t clockBegin, clockEnd;
const int WIDTH = 400;
const int HEIGHT = 400;
// Create a memory DC compatible with the screen
HDC hdc = CreateCompatibleDC(0);
if (hdc == 0) cout << "Could not create memory device context";
// Create a bitmap compatible with the DC
// must use CreateDIBSection(), and this means all pixel ops must be synchronised
// using calls to GdiFlush() (see CreateDIBSection() docs)
BITMAPINFO bmi = {
{ sizeof(BITMAPINFOHEADER), WIDTH, HEIGHT, 1, 32, BI_RGB, 0, 0, 0, 0, 0 },
{ 0 }
};
unsigned char *pbits; // pointer to bitmap bits
HBITMAP hbm = CreateDIBSection(hdc, &bmi, DIB_RGB_COLORS, (void **)&pbits,
0, 0);
if (hbm == 0) cout << "Could not create bitmap";
//HDC hdcScreen = GetDC(0);
//HBITMAP hbm = CreateCompatibleBitmap(hdcScreen,WIDTH,HEIGHT);
// Select the bitmap into the DC
HGDIOBJ r = SelectObject(hdc, hbm);
if (r == 0) cout << "Could not select bitmap into DC";
// Choose the pixel format
PIXELFORMATDESCRIPTOR pfd = {
sizeof(PIXELFORMATDESCRIPTOR), // struct size
1, // Version number
PFD_DRAW_TO_BITMAP | PFD_SUPPORT_OPENGL, // use OpenGL drawing to BM
PFD_TYPE_RGBA, // RGBA pixel values
32, // color bits
0, 0, 0, // RGB bits shift sizes...
0, 0, 0, // Don't care about them
0, 0, // No alpha buffer info
0, 0, 0, 0, 0, // No accumulation buffer
32, // depth buffer bits
0, // No stencil buffer
0, // No auxiliary buffers
PFD_MAIN_PLANE, // Layer type
0, // Reserved (must be 0)
0, // No layer mask
0, // No visible mask
0, // No damage mask
};
int pfid = ChoosePixelFormat(hdc, &pfd);
if (pfid == 0) cout << "Pixel format selection failed";
// Set the pixel format
// - must be done *after* the bitmap is selected into DC
BOOL b = SetPixelFormat(hdc, pfid, &pfd);
if (!b) cout << "Pixel format set failed";
// Create the OpenGL resource context (RC) and make it current to the thread
HGLRC hglrc = wglCreateContext(hdc);
if (hglrc == 0) cout << "OpenGL resource context creation failed";
wglMakeCurrent(hdc, hglrc);
GLenum err = glewInit();
if (GLEW_OK != err)
{
/* Problem: glewInit failed, something is seriously wrong. */
std::cout << "glew init error" << std::endl;
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
std::cout << (glewGetExtension("GL_ARB_fragment_shader") == GL_TRUE);
std::cout << (glewGetExtension("GL_ARB_shader_objects") == GL_TRUE);
std::cout << (glewGetExtension("GL_ARB_shading_language_100") == GL_TRUE);
// Draw using GL - remember to sync with GdiFlush()
clockBegin = clock();
GdiFlush();
mGLRender();
//SaveBmp(hbm,"output.bmp");
clockEnd = clock();
printf("%d\n", clockEnd - clockBegin);
clockBegin = clock();
GdiFlush();
mGLRender1();
//SaveBmp(hbm,"output1.bmp");
clockEnd = clock();
printf("%d\n", clockEnd - clockBegin);
//opencv show img
Mat img(HEIGHT, WIDTH, CV_8UC4, (void *)pbits);
imshow("img", img);
waitKey();
destroyWindow("img");
// Clean up
wglDeleteContext(hglrc); // Delete RC
SelectObject(hdc, r); // Remove bitmap from DC
DeleteObject(hbm); // Delete bitmap
DeleteDC(hdc); // Delete DC
system("pause");
return 0;
}
above code works well in vs2015. But the line:
std::cout << (glewGetExtension("GL_ARB_fragment_shader") == GL_TRUE);
turns out the GL_ARB_fragment_shader extension is unusable. But I am sure my gpu support this extension.Because in a Simple freeglut application, glewGetExtension("GL_ARB_fragment_shader") return True.the code is here:
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <iostream>
// Window attributes
static const unsigned int WIN_POS_X = 30;
static const unsigned int WIN_POS_Y = WIN_POS_X;
static const unsigned int WIN_WIDTH = 512;
static const unsigned int WIN_HEIGHT = WIN_WIDTH;
void glInit(int, char **);
int main(int argc, char * argv[])
{
// Initialize OpenGL
glInit(argc, argv);
// A valid OpenGL context has been created.
// You can call OpenGL functions from here on.
GLenum err = glewInit();
if (GLEW_OK != err)
{
/* Problem: glewInit failed, something is seriously wrong. */
std::cout << "glew init error" << std::endl;
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
std::cout << (glewGetExtension("GL_ARB_fragment_shader") == GL_TRUE);
std::cout << (glewGetExtension("GL_ARB_shader_objects") == GL_TRUE);
std::cout << (glewGetExtension("GL_ARB_shading_language_100") == GL_TRUE);
glutMainLoop();
return 0;
}
void Display()
{
} // end Display()
void glInit(int argc, char ** argv)
{
// Initialize GLUT
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE);
glutInitWindowPosition(WIN_POS_X, WIN_POS_Y);
glutInitWindowSize(WIN_WIDTH, WIN_HEIGHT);
glutCreateWindow("Hello OpenGL!");
glutDisplayFunc(Display);
return;
}
above code works well in vs2015. And the value of glewGetExtension("GL_ARB_fragment_shader")
is True. So does different opengl context has different opengl extension? Please help me.
Yes, different OpenGL contexts may support different OpenGL versions and/or extensions. In your particular case the off-screen context you're creating will use the GDI software rasterizer fallback. The way you create the context it will never be GPU accelerated!
If you want to create a GPU accelerated OpenGL context you'll either have to
use a PBuffer (which gives you a HDC without a window)
or
create an OpenGL context on a hidden window and render to a FBO (the most commom method these days)
or
use one of the new pure offscreen context creation methods that are independent of the OS (see e.g. https://devblogs.nvidia.com/parallelforall/egl-eye-opengl-visualization-without-x-server/ for how to do it on NVidia – also applies to Windows)
However even if OpenGL contexts are GPU accelerated, and even if they happen to be created on the same machine and GPU, they may differ in version and extension support.

Activating Multisample on OpenGL Win32

I want to set up MSAA on an OpenGL context in win32 API. Everything is working fine, but the MSAA just doesn't want to activate. Here is my code for building the context:
void Display::CreateGLContext(HWND hWND) {
mHDC = GetDC(hWND); //get current windows device context
int nPixelFormat;
PIXELFORMATDESCRIPTOR pfd; // Create a new PIXELFORMATDESCRIPTOR (PFD)
memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR)); // Clear our PFD
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR); // Set the size of the PFD to the size of the class
pfd.dwFlags = PFD_DOUBLEBUFFER | PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW; // Enable double buffering, opengl support and drawing to a window
pfd.iPixelType = PFD_TYPE_RGBA; // Set our application to use RGBA pixels
pfd.cColorBits = 32; // Give us 32 bits of color information (the higher, the more colors)
pfd.cDepthBits = 16; // Give us 32 bits of depth information (the higher, the more depth levels)
pfd.iLayerType = PFD_MAIN_PLANE; // Set the layer of the PFD
/* Choose best matching format*/
nPixelFormat = ChoosePixelFormat(mHDC, &pfd);
/* Set the pixel format to the device context*/
SetPixelFormat(mHDC, nPixelFormat, &pfd);
HGLRC tempRC = wglCreateContext(mHDC);
wglMakeCurrent(mHDC, tempRC);
if (glewInit() != GLEW_OK) {
MessageBox(mHWND, "Eroare", "glew", MB_OK);
}
int nPixelFormat2;
BOOL bValidPixFormat;
UINT nMaxFormats = 1;
UINT nNumFormats;
float pfAttribFList[] = { 0, 0 };
int piAttribIList[] = {
WGL_DRAW_TO_WINDOW_ARB, GL_TRUE,
WGL_SUPPORT_OPENGL_ARB, GL_TRUE,
WGL_COLOR_BITS_ARB, 32,
WGL_RED_BITS_ARB, 8,
WGL_GREEN_BITS_ARB, 8,
WGL_BLUE_BITS_ARB, 8,
WGL_ALPHA_BITS_ARB, 8,
WGL_DEPTH_BITS_ARB, 16,
WGL_STENCIL_BITS_ARB, 0,
WGL_DOUBLE_BUFFER_ARB, GL_TRUE,
WGL_PIXEL_TYPE_ARB, WGL_TYPE_RGBA_ARB,
WGL_SAMPLE_BUFFERS_ARB, GL_TRUE,
WGL_SAMPLES_ARB, 16,
0, 0 };
bValidPixFormat = wglChoosePixelFormatARB(mHDC, piAttribIList, pfAttribFList, nMaxFormats, &nPixelFormat2, &nNumFormats);
if (!bValidPixFormat)
{
MessageBox(NULL, "Invalid Pixel Format", "Error! (SetupWGLPixelFormat)", MB_OK);
}
SetPixelFormat(mHDC, nPixelFormat2, &pfd);
mGLRenderContext = wglCreateContext(mHDC);
wglMakeCurrent(mHDC, NULL);
wglDeleteContext(tempRC);
wglMakeCurrent(mHDC, mGLRenderContext);
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
}
The code works fine, it is called after creating hWnd in the main class, not in WndProc on the WM_CREATE case... What can be wrong?
I see the part where you asked for 16 samples. But I don't see the part where you enabled GL_MULTISAMPLE. Without which, rendering to a multisampled buffer will act no differently from rendering to a single sampled one.
Also, I would advise you to use a framebuffer object for your multisample render target instead of the default framebuffer. Yes, it's nice that the default framebuffer can be resized by the window. But by using a framebuffer object, you can control when multisampling is resolved.
Also, it allows you to keep the driver's pesky control panel options from messing with your sample counts ;)
I have found the problem. Basically you can't call SetPixelFormat on the same window as this article mentions: https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Proper_Context_Creation
The solution is basically to create a dummy window(not visible) enable opengl and delete it. I have copied the code from here and it worked for me Create Modern OpenGL context using WGL?

OpenGL overwrites Windows' buttons

I have following code:
void DrawGLScene(unsigned char *drawing_bytes, HDC hdc, int xWidth, int yWidth) {
if ((!xWidth) || (!yWidth)) return;
BOOL returnVal = wglMakeCurrent(hdc, hrc);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, xWidth, yWidth, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, drawing_bytes);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glViewport(0,0,xWidth,yWidth); // Reset The Current Viewport
glMatrixMode(GL_PROJECTION); // Select The Projection Matrix
glLoadIdentity(); // Reset The Projection Matrix
// Calculate The Aspect Ratio Of The Window
gluPerspective(25.0f,1.0f,0.1f,100.0f);
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glLoadIdentity(); // Reset The Modelview Matrix
glEnable(GL_TEXTURE_2D); // Enable Texture Mapping
glShadeModel(GL_SMOOTH); // Enable Smooth Shading
glDisable(GL_DEPTH_TEST); // Enables Depth Testing
glDepthFunc(GL_LEQUAL); // The Type Of Depth Testing To Do
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Really Nice Perspective Calculations
glLoadIdentity(); // Reset The View
glTranslatef(0.0f,0.0f,-5.0f);
glBindTexture(GL_TEXTURE_2D, texture);
glColor4f(1.0, 1.0, 1.0, 1.0);
glBegin(GL_QUADS);
// Front Face
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 0.5f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 0.5f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 0.5f);
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 0.5f);
glEnd();
glSwapBuffers(hdc);
}
This code overwrites my buttons created earlier via
hInstallButton = CreateWindow(TEXT("button"), "",
WS_VISIBLE | WS_CHILD | BS_AUTOCHECKBOX,
137, 70, 13, 13,
hWnd, (HMENU) 1, GetModuleHandle(NULL), NULL);
The issue is the glSwapBuffers(), which hides the buttons for good.
This is generated by the PIXELFORMATDESCRIPTOR
static PIXELFORMATDESCRIPTOR pfd= // pfd Tells Windows How We Want Things To Be
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
0, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
24, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
How can I force a single buffer, or something to write the buttons to both buffers? I am at a good loss here, and don't know how to do it properly (except maybe recreate the buttons with each WM_PAINT call)?
Edit:
Tried with subwindow (see code), but it creates a second window, instead of embedding into the first window.
BOOL InitInstance(HINSTANCE hInstance, int nCmdShow)
{
HWND hWnd;
hInst = hInstance; // Instanzenhandle in der globalen Variablen speichern
DWORD dwExStyle; // Window Extended Style
DWORD dwStyle; // Window Style
hWnd = CreateWindow(szWindowClass, szTitle, WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT, 0, CW_USEDEFAULT, 0, NULL, NULL, hInstance, NULL);
if (!hWnd) {
return FALSE;
}
dwExStyle=WS_EX_APPWINDOW | WS_EX_WINDOWEDGE | CS_OWNDC; // Window Extended Style
dwStyle=WS_VISIBLE | WS_CLIPSIBLINGS | WS_CLIPCHILDREN; // Windows Style
WNDCLASS wndClass;
wndClass.style = CS_OWNDC | CS_HREDRAW | CS_VREDRAW;
wndClass.lpfnWndProc = WndProc;
wndClass.cbClsExtra = 0;
wndClass.cbWndExtra = 0;
wndClass.hInstance = hInstance;
wndClass.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndClass.hCursor = LoadCursor(NULL, IDC_ARROW);
wndClass.hbrBackground = CreateSolidBrush(BLACK_BRUSH);
wndClass.lpszMenuName = NULL;
wndClass.lpszClassName = "Test Window";
RegisterClass(&wndClass);
hWndOpenGL = CreateWindowEx( dwExStyle, // Extended Style For The Window
"Test Window", // Class Name
"Testy test", // Window Title
dwStyle, // Required Window Style
0, 0, // Window Position
800,
600,
hWnd, // Parent Window
NULL, // No Menu
hInstance, // Instance
NULL);
//CreateWindow(szWindowClass, szTitle, WS_OVERLAPPEDWINDOW,
//CW_USEDEFAULT, 0, CW_USEDEFAULT-500, 0, hWnd, NULL, hInstance, NULL);
static PIXELFORMATDESCRIPTOR pfd= // pfd Tells Windows How We Want Things To Be
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
0, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
24, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
hdcOpenGL=GetDC(hWndOpenGL);
GLuint PixelFormat; // Holds The Results After Searching For A Match
PixelFormat=ChoosePixelFormat(hdcOpenGL,&pfd);
SetPixelFormat(hdcOpenGL,PixelFormat,&pfd);
hrc=wglCreateContext(hdcOpenGL);
ShowWindow(hWnd, nCmdShow);
UpdateWindow(hWnd);
ShowWindow(hWndOpenGL, nCmdShow);
UpdateWindow(hWndOpenGL);
return TRUE;
}
I'm guessing that you created the buttons as childs of the OpenGL window. If you did this, well, then you actually did something, that's explicitly mentioned in the WGL and Win32 API documentation to break things .
The fix is simple: The OpenGL window should be a sibling to the buttons and have its very own DC: Create a own subwindow for OpenGL operations with the CS_OWNDC window class flag set and the WS_CLIPSIBLINGS | WS_CLIPCHILDREN window styles being set. Both the OpenGL subwindow and the buttons are created with the desired container window as parent.
That way the buttons will not get clobbered by OpenGL operations, even with a double buffered pixelformat.