OpenGL offscreen render - c++

I have an application that creates a 3D model and exports an image from that. I use this example to do it:
#include <windows.h>
#include <GL\GL.h>
#include <GL\glu.h>
#include <GL\glut.h>
#include <opencv2\highgui.hpp>
GLfloat light_diffuse[] = { 1.0, 0.0, 0.0, 1.0 }; /* Red diffuse light. */
GLfloat light_position[] = { 1.0, 1.0, 1.0, 0.0 }; /* Infinite light location. */
GLfloat n[6][3] = { /* Normals for the 6 faces of a cube. */
{ -1.0, 0.0, 0.0 },{ 0.0, 1.0, 0.0 },{ 1.0, 0.0, 0.0 },
{ 0.0, -1.0, 0.0 },{ 0.0, 0.0, 1.0 },{ 0.0, 0.0, -1.0 } };
GLint faces[6][4] = { /* Vertex indices for the 6 faces of a cube. */
{ 0, 1, 2, 3 },{ 3, 2, 6, 7 },{ 7, 6, 5, 4 },
{ 4, 5, 1, 0 },{ 5, 6, 2, 1 },{ 7, 4, 0, 3 } };
GLfloat v[8][3]; /* Will be filled in with X,Y,Z vertexes. */
void drawBox(void)
{
int i;
for (i = 0; i < 6; i++) {
glBegin(GL_QUADS);
glNormal3fv(&n[i][0]);
glVertex3fv(&v[faces[i][0]][0]);
glVertex3fv(&v[faces[i][1]][0]);
glVertex3fv(&v[faces[i][2]][0]);
glVertex3fv(&v[faces[i][3]][0]);
glEnd();
}
}
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
drawBox();
glFlush();
}
void init(void)
{
/* Setup cube vertex data. */
v[0][0] = v[1][0] = v[2][0] = v[3][0] = -1;
v[4][0] = v[5][0] = v[6][0] = v[7][0] = 1;
v[0][1] = v[1][1] = v[4][1] = v[5][1] = -1;
v[2][1] = v[3][1] = v[6][1] = v[7][1] = 1;
v[0][2] = v[3][2] = v[4][2] = v[7][2] = 1;
v[1][2] = v[2][2] = v[5][2] = v[6][2] = -1;
/* Enable a single OpenGL light. */
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHTING);
/* Use depth buffering for hidden surface elimination. */
glEnable(GL_DEPTH_TEST);
/* Setup the view of the cube. */
glMatrixMode(GL_PROJECTION);
gluPerspective( /* field of view in degree */ 40.0,
/* aspect ratio */ 1.0,
/* Z near */ 1.0, /* Z far */ 10.0);
glMatrixMode(GL_MODELVIEW);
gluLookAt(0.0, 0.0, 5.0, /* eye is at (0,0,5) */
0.0, 0.0, 0.0, /* center is at (0,0,0) */
0.0, 1.0, 0.); /* up is in positive Y direction */
/* Adjust cube position to be asthetic angle. */
glTranslatef(0.0, 0.0, -1.0);
glRotatef(60, 1.0, 0.0, 0.0);
glRotatef(-20, 0.0, 0.0, 1.0);
}
int main(int argc, char **argv)
{
int width = 500, height = 500;
/********* i want to remove this section ************/
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutCreateWindow("red 3D lighted cube");
/********* i want to remove this section ************/
init();
display();
BYTE* result = new BYTE[3 * width *height];
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, result);
cv::Mat img(width, height, CV_8UC3);
img.data = result;
cv::flip(img, img, 0);
cv::imwrite("D:\\result_off.jpg", img);
return 0; /* ANSI C requires main to return int. */
}
It does correctly but when I run this program it creates a window and shows it then removes it.
I've tried to remove glut* function and run my program but it didn't export anything when running it. I googled it and found that I should use Framebuffer but I couldn't find any example.
How can I set my program doesn't show any window when rendering my 3D model?
Note: I want to run this program in Windows and Linux.

I just had a look into the source code I did for Windows. As it was a study for productive code (and hence uses other stuff of our productive code) I cannot provide it as is. What I present here is a stripped version which should show how it works:
// standard C/C++ header:
#include <iostream>
// Windows header:
#include <Windows.h>
using namespace std;
int main(int argc, char **argv)
{
if (argc < 3) {
cerr << "USAGE: " << argv[0]
<< " FILE [FILES...] IMG_FILE" << endl;
return -1;
}
// Import Scene Graph
// excluded: initialize importers
// excluded: import 3d files
#ifdef _WIN32
// Window Setup
// set window properties
enum { Width = 1024, Height = 768 };
WNDCLASSEX wndClass; memset(&wndClass, 0, sizeof wndClass);
wndClass.cbSize = sizeof(WNDCLASSEX);
wndClass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC | CS_DBLCLKS;
wndClass.lpfnWndProc = &DefWindowProc;
wndClass.cbClsExtra = 0;
wndClass.cbWndExtra = 0;
wndClass.hInstance = 0;
wndClass.hIcon = 0;
wndClass.hCursor = LoadCursor(0, IDC_ARROW);
wndClass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);
wndClass.lpszMenuName = 0;
wndClass.lpszClassName = "WndClass";
wndClass.hIconSm = 0;
RegisterClassEx(&wndClass);
// style the window and remove the caption bar (WS_POPUP)
DWORD style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_POPUP;
// Create the window. Position and size it.
HWND hwnd = CreateWindowEx(0,
"WndClass",
"",
style,
CW_USEDEFAULT, CW_USEDEFAULT, Width, Height,
0, 0, 0, 0);
HDC hdc = GetDC(hwnd);
// Windows OpenGL Setup
PIXELFORMATDESCRIPTOR pfd; memset(&pfd, 0, sizeof pfd);
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cDepthBits = 16;
pfd.cStencilBits = 8;
pfd.iLayerType = PFD_MAIN_PLANE;
// get the best available match of pixel format for the device context
int iPixelFormat = ChoosePixelFormat(hdc, &pfd);
// make that the pixel format of the device context
SetPixelFormat(hdc, iPixelFormat, &pfd);
// create the context
HGLRC hGLRC = wglCreateContext(hdc);
wglMakeCurrent(hdc, hGLRC);
#endif // _WIN32
// OpenGL Rendering Setup
/* excluded: init our private OpenGL binding as
* the Microsoft API for OpenGL is stuck <= OpenGL 2.0
*/
// create Render Buffer Object (RBO) for colors
GLuint rboColor = 0;
glGenRenderbuffers(1, &rboColor);
glBindRenderbuffer(GL_RENDERBUFFER, rboColor);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8, Width, Height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// create Render Buffer Object (RBO) for depth
GLuint rboDepth = 0;
glGenRenderbuffers(1, &rboDepth);
glBindRenderbuffer(GL_RENDERBUFFER, rboDepth);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, Width, Height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// create Frame Buffer Object (FBO)
GLuint fbo = 0;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
// attach RBO to FBO
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER, rboColor);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, rboDepth);
// GL Rendering Setup
// excluded: prepare our GL renderer
glViewport(0, 0, Width, Height);
glClearColor(0.525f, 0.733f, 0.851f, 1.0f);
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
/* compute projection matrix from
* - field of view (property fov)
* - aspect ratio of view
* - near/far clip distance (properties dNear and dFar).
*/
const DegreeD fov(30.0);
const double dNear = 0.1, dFar = 100.0;
const double ar = (float)Width / Height;
const double d = ::tan(fov / 2.0) * 2.0 * dNear;
// excluded: construct a projection matrix for perspective view
// excluded: determine bounding sphere of 3D scene
// excluded: compute camera and view matrix from the bounding sphere of scene
// excluded: OpenGL rendering of 3d scene
// read image from render buffer
// excluded: prepare image object to store read-back
//Image::Object img(4, Image::BottomToTop);
//img.set(Width, Height, Image::RGB24);
//const size_t bytesPerLine = (3 * Width * 4 + 3) / 4;
//glReadPixels(0, 0, Width, Height, GL_RGB, GL_UNSIGNED_BYTE, img.getData());
// store image
const string filePath = argv[argc - 1];
// excluded: export image in a supported image file format
// clean-up
// excluded: clean-up of 3D scene (incl. OpenGL rendering add-ons)
glDeleteFramebuffers(1, &fbo);
glDeleteRenderbuffers(1, &rboColor);
glDeleteRenderbuffers(1, &rboDepth);
#ifdef _WIN32
wglMakeCurrent(NULL, NULL);
wglDeleteContext(hGLRC);
#endif // _WIN32
// done
return 0;
}
I didn't check whether this even compiles (as is above). It is stripped out of code which compiles and run on Windows 10 on my side.
A Note about OpenGL and Windows:
I did the GL binding by myself because the Microsoft Windows OpenGL API does not support OpenGL 3.0 or higher. (I could've used a library like glfw instead.) This means I have to assign function addresses to function pointers (to correct function prototypes) so that I can call OpenGL functions properly using C function calls.
The availability of the functions is granted if I have appropriate H/W and the appropriate drivers installed. (There are possibilities to check whether the driver provides certain functions.)
If such bound function call fails (e.g. with a segmentation fault) possible reasons could be:
The signature of called function is wrong. (I used headers downloaded from khronos.org to grant correct prototypes. Hopefully, the driver provider did as well.)
The function does not exist in the driver. (I use functions which are part of the OpenGL standard which is supported by the installed driver. The driver supports OpenGL 4.x but I need only OpenGL 3.x (at least until now).)
The function pointers have to be initialized before I use them. (I have written an initialization which is not exposed in the code. This is where I placed the comment /* excluded: init our private OpenGL binding as the Microsoft API for OpenGL is stuck <= OpenGL 2.0 */.
To illustrate this, some code examples:
In my OpenGL init function, I do:
glGenFramebuffers
= (PFNGLGENFRAMEBUFFERSPROC)wglGetProcAddress(
"glGenFramebuffers");
and the header provides:
extern PFNGLGENFRAMEBUFFERSPROC glGenFramebuffers;
PFNGLGENFRAMEBUFFERSPROC is provided by glext.h I downloaded from kronos.org:
typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers);
wglGetProcAddress() is provided by the Microsoft Windows API.
A Note about OpenGL and Linux:
If H/W and the installed driver supports the desired OpenGL standard, functions can be used as usual by
including the necessary headers (e.g. #include <GL/gl.h>)
linking the necessary libraries (e.g. -lGL -lGLU).
derhass commented:
There is absolutely no guarantee that GL 3.x functions are exported by whatever libGL.so one is using, and even if they are exported, there is no guarantee that the function is supported (i.e. mesa uses the same frontend lib for all driver backends, but each driver may only support a subset of the functions). You have to use the extension mechanism on both platforms.
I'm not able to provide a simple recommendation how to handle this nor I've valuable practical experience about this. So, I want to provide at least these links (from khronos.org) I've found by google search:
Load OpenGL Functions
OpenGL Context
OpenGL Loading Library.

Related

Exclude points in overlapping area of two circles in OpenGL

I want to draw tow circles with the same radii but exclude the overlapped area when drawing.
I want to draw or set dots on gray area.
I implement the mathematical aspect behind it and here is my code:
void draw_venn(){
float radian_to_degree_theta=2 * 3.14 / 360,
r = 0.5,
distance=0.3,
theta=0.0,
theta2=0.0,
xR=0.0,
yR=0.0,
xG=0.0,
yG=0.0,
sum_radii=0,
dis=0.0;
sum_radii=r+r;
for (r = 0.5; r >=0; r-=0.001)
{
for (float degree = 0; degree < 361; degree+=0.1)
{
theta =degree*radian_to_degree_theta;
xR=r*cos(theta)+distance;
yR=r*sin(theta);
xG=r*cos(theta)-distance;
yG=r*sin(theta);
dis=sqrt(pow(xR-xG,2) + pow(yR-yG,2));
if (dis <= sum_radii)
{
set_point(xR,yR,0.1,1,0,0);
set_point(xG,yG,0.1,0,1,0);
}
}
}
}
void set_point(float x,float y,float size,float R,float G,float B){
glPointSize(size);
glBegin (GL_POINTS);
glColor3f (R, G, B);
glVertex2f(x, y);
glEnd ();
}
void draw(void)
{
glClearColor (1, 1, 1, 0);
glClear (GL_COLOR_BUFFER_BIT);
glPushMatrix();
draw_ven();
glPopMatrix ();
glFlush();
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE);
glutInitWindowSize(1400, 1400);
glutInitWindowPosition(700, 500);
glutCreateWindow("GL Sample");
glutDisplayFunc(draw);
glutMainLoop();
return 0;
}
and here is the result:
How can I find if a point is inside the overlapping area?
I reviewed and tested out your code. Trigonometry can get a bit tricky. Following is the "draw_venn" function with some refinements to produce an overlap effect.
void draw_venn()
{
float radian_to_degree_theta=2 * 3.141 / 360,
r = 0.5,
distance=0.3,
theta=0.0,
theta2 = 0.0,
xR=0.0,
yR=0.0,
xG=0.0,
yG=0.0,
dis=0.0;
glPointSize(1);
glColor3f(1,0,0);
glBegin(GL_POINTS);
for (r = 0.5; r >=0; r-=0.001)
{
for (float degree = 0; degree < 361; degree+=0.1)
{
theta =degree*radian_to_degree_theta;
theta2 = (180.0 - degree) * radian_to_degree_theta;
xR=r*cos(theta2)+distance;
yR=r*sin(theta2);
xG=r*cos(theta)-distance;
yG=r*sin(theta);
dis = sqrt(pow((distance - xG), 2) + pow(yG, 2));
if (dis < 0.5)
{
set_point(xR,yR,0.1,0,0,1); /* Color the overlap blue */
set_point(xG,yG,0.1,0,0,1); /* This works due to symmetry */
}
else
{
set_point(xR,yR,0.1,1,0,0); /* Set the symmetrical circle colors */
set_point(xG,yG,0.1,0,1,0);
}
}
}
glEnd();
}
Pointing out the two significant revisions, first I derive a mirror image value for "theta" and place that into variable "theta2". That is used to draw the red circle. This assures that the circle images are being built in equal but opposite directions so that the coordinates are symmetrical. Second, I revised the formula for checking if the green image coordinates fall within the red circle's outermost radius. Using the Pythagorean theorem calculation for the hypotenuse, the formula determines if the hypotenuse value is smaller than the outermost radius length (0.5). If it is smaller make that point for the green circle blue, and since the circle points are being built and colored symmetrically, also make the corresponding point for the red circle blue.
The result of those revisions is a Venn Diagram showing an overlap.
I hope that helps and gives you a springboard to proceed.
Regards.
Add 8bit Stencil Buffer to your context
I do it by setting up pixelformat like this:
PIXELFORMATDESCRIPTOR pfd;
ZeroMemory( &pfd, sizeof( pfd ) ); // set the pixel format for the DC
pfd.nSize = sizeof( pfd );
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 24;
pfd.cDepthBits = 24;
pfd.cStencilBits = 8;
pfd.iLayerType = PFD_MAIN_PLANE;
SetPixelFormat(hdc,ChoosePixelFormat(hdc, &pfd),&pfd);
Enable and Clear Stencil with 0 and setup it for incrementation
Disable color and depth output
Render circles
Enable color and depth output
Set up Stencil test to not equal 2
You can also use equal to 1 in case you overlap more than just 2 objects
Render circles
Disable Stencil test
I see it like this:
//---------------------------------------------------------------------------
void glCircle(float x,float y,float r)
{
int e;
float a,da=2.0*M_PI/72.0;
glBegin(GL_TRIANGLE_FAN);
glVertex2f(x,y);
for (e=1,a=0.0;e;a+=da)
{
if (a>=2.0*M_PI) { e=0; a=0.0; }
glVertex2f(x+(r*sin(a)),y+(r*cos(a)));
}
glEnd();
}
//---------------------------------------------------------------------------
void gl_draw()
{
glClearColor(1.0,1.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
float aspect=float(xs)/float(ys); //xs,ys is screen resolution
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glScalef(1.0,aspect,1.0);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glDisable(GL_DEPTH_TEST);
glDisable(GL_TEXTURE_2D);
// turn off color,depth
glStencilMask(0xFF);
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
glDepthMask(GL_FALSE);
// clear stencil and setup it for increment
glEnable(GL_STENCIL_TEST);
glClearStencil(0);
glClear(GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_ALWAYS,1,0xFF);
glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
// render stencil
glCircle(-0.3,0.0,0.6);
glCircle(+0.3,0.0,0.6);
// turn on color,depth
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
glDepthMask(GL_TRUE);
// render screen (where Stencil is not 2)
glStencilFunc(GL_NOTEQUAL,2,0xFF);
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
glColor3f(1.0,0.0,0.0); glCircle(-0.3,0.0,0.6);
glColor3f(0.0,1.0,0.0); glCircle(+0.3,0.0,0.6);
glDisable(GL_STENCIL_TEST);
glFlush();
SwapBuffers(hdc);
}
//---------------------------------------------------------------------------
And output:
In case you also want to know if pixel is inside both circles you can use:
GLint a;
glReadPixels(X,ys-1-Y,1,1,GL_STENCIL_INDEX,GL_INT,&a);
if (a==2); // X,Y is inside both circles
else; // X,Y is not inside both circles
In case You insist on rendering the stuff pixel by pixel then do it at least properly As your current approach is horibly slow for many reasons... For example you can do this like this:
glClearColor(1.0,1.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glDisable(GL_TEXTURE_2D);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glTranslatef(-1.0,-1.0,0.0);
glScalef(2.0/xs,2.0/ys,1.0);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
int x0=(4*xs)/10,y0=ys/2,r0=xs/4; // circle0
int x1=(6*xs)/10,y1=ys/2,r1=xs/4; // circle1
int x,y,xx0,xx1,yy0,yy1,rr0=r0*r0,rr1=r1*r1;
glBegin(GL_POINTS);
glColor3f(1.0,0.0,0.0);
for (x=-r0;x<=r0;x++){ xx0=x; xx0*=xx0; xx1=x+x0-x1; xx1*=xx1;
for (y=-r0;y<=r0;y++){ yy0=y; yy0*=yy0; yy1=y+y0-y1; yy1*=yy1;
if (xx0+yy0<=rr0)
if (xx1+yy1>=rr1)
glVertex2i(x0+x,y0+y); }}
glColor3f(0.0,1.0,0.0);
for (x=-r1;x<=r1;x++){ xx1=x; xx1*=xx1; xx0=x+x1-x0; xx0*=xx0;
for (y=-r1;y<=r1;y++){ yy1=y; yy1*=yy1; yy0=y+y1-y0; yy0*=yy0;
if (xx1+yy1<=rr1)
if (xx0+yy0>=rr0)
glVertex2i(x1+x,y1+y); }}
glEnd();
glFlush();
SwapBuffers(hdc);
Where xs,ys is GL screen resolution.
See related:
I have an OpenGL Tessellated Sphere and I want to cut a cylindrical hole in it
OpenGL 3D-raypicking with high poly meshes
Is there a more efficient way of texturing a circle?

Im trying to use OpenGL with the windows API on different threads

So basically I am using the windows api to create an emty window and then I use OpenGL to draw to that window from different threads. I managed to do this just with one thread, but getting and dispatching system messages so that the window is usable was slowing down the frame rate I was able to get, so I'm trying to get another thread to do that in parallel while I draw in the main thread.
To do this I have a second thread which creates an empty window and enters an infinite loop to handle the windows message loop. Before entering the loop it passes the HWND of the empty window to the main thread so OpenGl can be initialised. To do that I use the PostThreadMessage function and use the message code WM_USER and the wParam of the message to send the window handler back. Here is the code to that secondary thread:
bool t2main(DWORD parentThreadId, int x = 0, int y = 0, int w = 256, int h = 256, int pixelw = 2, int pixelh = 2, const char* windowName = "Window") {
// Basic drawing values
int sw = w, sh = h, pw = pixelw, ph = pixelh;
int ww = 0; int wh = 0;
// Windows API window handler
HWND windowHandler;
// Calculate total window dimensions
ww = sw * pw; wh = sh * ph;
// Create the window handler
WNDCLASS wc;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
wc.hInstance = GetModuleHandle(nullptr);
wc.lpfnWndProc = DefWindowProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.lpszMenuName = nullptr;
wc.hbrBackground = nullptr;
wc.lpszClassName = "windowclass";
RegisterClass(&wc);
DWORD dwExStyle = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE;
DWORD dwStyle = WS_CAPTION | WS_SYSMENU | WS_VISIBLE | WS_THICKFRAME;
RECT rWndRect = { 0, 0, ww, wh };
AdjustWindowRectEx(&rWndRect, dwStyle, FALSE, dwExStyle);
int width = rWndRect.right - rWndRect.left;
int height = rWndRect.bottom - rWndRect.top;
windowHandler = CreateWindowEx(dwExStyle, "windowclass", windowName, dwStyle, x, y, width, height, NULL, NULL, GetModuleHandle(nullptr), NULL);
if(windowHandler == NULL) { return false; }
PostThreadMessageA(parentThreadId, WM_USER, (WPARAM) windowHandler, 0);
for(;;) {
MSG msg;
PeekMessageA(&msg, NULL, 0, 0, PM_REMOVE);
DispatchMessageA(&msg);
}
}
This function gets called from the main entry point, which correctly recieves the window handler and then tries to setup OpenGL with it. Here is the code:
int main() {
// Basic drawing values
int sw = 256, sh = 256, pw = 2, ph = 2;
int ww = 0; int wh = 0;
const char* windowName = "Window";
// Thread stuff
DWORD t1Id, t2Id;
HANDLE t1Handler, t2Handler;
// Pixel array
Pixel* pixelBuffer = nullptr;
// OpenGl device context to draw
HDC glDeviceContext;
HWND threadWindowHandler;
t1Id = GetCurrentThreadId();
std::thread t = std::thread(&t2main, t1Id, 0, 0, sw, sh, pw, ph, windowName);
t.detach();
t2Handler = t.native_handle();
t2Id = GetThreadId(t2Handler);
while(true) {
MSG msg;
PeekMessageA(&msg, NULL, WM_USER, WM_USER + 100, PM_REMOVE);
if(msg.message == WM_USER) {
threadWindowHandler = (HWND) msg.wParam;
break;
}
}
// Initialise OpenGL with thw window handler that we just created
glDeviceContext = GetDC(threadWindowHandler);
PIXELFORMATDESCRIPTOR pfd = {
sizeof(PIXELFORMATDESCRIPTOR), 1,
PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PFD_MAIN_PLANE, 0, 0, 0, 0
};
int pf = ChoosePixelFormat(glDeviceContext, &pfd);
SetPixelFormat(glDeviceContext, pf, &pfd);
HGLRC glRenderContext = wglCreateContext(glDeviceContext);
wglMakeCurrent(glDeviceContext, glRenderContext);
// Create an OpenGl buffer
GLuint glBuffer;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glBuffer);
glBindTexture(GL_TEXTURE_2D, glBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Create a pixel buffer to hold the screen data and allocate space for it
pixelBuffer = new Pixel[sw * sh];
for(int32_t i = 0; i < sw * sh; i++) {
pixelBuffer[i] = Pixel();
}
// Test a pixel
pixelBuffer[10 * sw + 10] = Pixel(255, 255, 255);
// Push the current buffer into view
glViewport(0, 0, ww, wh);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sw, sh, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelBuffer);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0f, -1.0f, 0.0f);
glEnd();
SwapBuffers(glDeviceContext);
for(;;) {}
}
To hold the pixel information I'm using this struct:
struct Pixel {
union {
uint32_t n = 0xFF000000; //Default 255 alpha
struct {
uint8_t r; uint8_t g; uint8_t b; uint8_t a;
};
};
Pixel() {
r = 0;
g = 0;
b = 0;
a = 255;
}
Pixel(uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha = 255) {
r = red;
g = green;
b = blue;
a = alpha;
}
};
When I try to run this code I don't get the desired pixel output, instead I just get the empty window, as if OpenGl handn't initialised correctly. When I use the same code but all into one thread I get the empty window with the pixel in it. What am I doing wrong here?, Is there something I need to do before I initialise OpenGl in another thread? I apreciate all kind of feedback. Thanks in advance.
There are several issues here. Let's address them in order.
First let's recall the rules of:
OpenGL and threads
The basic rules about OpenGL with regard to windows, device context and threads are:
An OpenGL context is not associated with a particular window or device context.
You can make a OpenGL context "current" on any device context (HDC, usually associated with a Window) that is compatible to the device context with which the context was original created with.
An OpenGL context can be "current" on only one thread at a time, or not be active at all.
To move OpenGL context "current state" from one thread to another you do:
first: unmake "current" the context on the thread it's currently used on
second: make it "current" on the thread you want to be current on.
More than one (including all) threads in a process can have a OpenGL context "current" at the same time.
Multiple OpenGL contexts (including all) – which will be rule 5 be current in different threads – can be current with the same device context (HDC) at the same time.
There are no defined rules for drawing commands happening concurrently on different threads, but current on the same HDC. Ordering must happen by the user, by placing appropriate locks that work OpenGL synchronization primitives. Until the introduction of explicit, fine grains synchronization objects into OpenGL the only synchronization available were glFinish and the implicit synchronization point calls of OpenGL (e.g. glReadPixels).
Misconceptions in your understanding what OpenGL does
This comes from reading the comments in your code:
int main() {
Why is your thread function called main. main is a reserved name, exclusively to be used for the process entry function. Even if your entry is WinMain you must not use main as a functio name.
// Pixel array
Pixel* pixelBuffer = nullptr;
It's unclear what the pixelBuffer is meant for, later on. You will call it on a texture. but apparently don't set up the drawing to use a texture.
t1Id = GetCurrentThreadId();
std::thread t = std::thread(&t2main, t1Id, 0, 0, sw, sh, pw, ph, windowName);
t.detach();
t2Handler = t.native_handle();
t2Id = GetThreadId(t2Handler);
What, I don't even. What is this supposed to do in the first place? First things first: Don't mix Win32 threads API and C++ std::thread. Decice in one, and stick with it.
while(true) {
MSG msg;
PeekMessageA(&msg, NULL, WM_USER, WM_USER + 100, PM_REMOVE);
if(msg.message == WM_USER) {
threadWindowHandler = (HWND) msg.wParam;
break;
}
}
Why the hell are you passing the window handle through a thread message? This is so wrong on so many levels. Threads all live in the same address space, so you could use a queue, or global variables, or pass is as parameter to the thread entry function, etc., etc.
Furthermore you could just have created the OpenGL context in the main thread and then just passed it over.
wglMakeCurrent(glDeviceContext, glRenderContext);
// Create an OpenGl buffer
GLuint glBuffer;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glBuffer);
That doesn't create an OpenGL buffer object, it creates a texture name.
glBindTexture(GL_TEXTURE_2D, glBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Create a pixel buffer to hold the screen data and allocate space
pixelBuffer[10 * sw + 10] = Pixel(255, 255, 255);for it
Uhh, no, you don't supply drawable buffers to OpenGL in that way. Heck, you don't even supply draw buffers to OpenGL explicitly at all (this is not D3D12, Metal or Vulkan, where you do).
// Push the current buffer into view
glViewport(0, 0, ww, wh);
Noooo. That's not what glViewport does!
glViewport is part of the transformation pipeline state and ultimately is sets the destination rectangle of where inside a drawable the clip space volume will be mapped to. It does absolutely nothing with respect to the drawable buffers.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sw, sh, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelBuffer);
I think you don't understand what a texture is for. What this call does is, copying over the contexts of pixelBuffer into the currently bound texture. After that OpenGL is no longer concerned with pixelBuffer at all.
glBegin(GL_QUADS);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0f, -1.0f, 0.0f);
glEnd();
Here you draw something, but never enabled the use of the texture in the first place. So all that ado about setting up the texture is for nothing.
SwapBuffers(glDeviceContext);
for(;;) {}
}
So after swapping the window buffers you make the thread spin forever. Two problems with that: There is still the main message loop over in the other thread that does handle other messages for the window. Including maybe WM_PAINT, and depending on if you've set a background brush and/or how you handle WM_ERASEBKGND whatever you just draw might instantly vanish thereafter.
And by spinning the thread you're consuming CPU time for no reason whatsover. You could just as well end the thread.
I solved the problem with the help of #datenwolf's comment primarly. Firstly, I used variable pointer to pass variables between threads, which removed the need for PostThreadMessageA, which was the main reasson why I was using winapi threads in the first place. I also changed the OpenGl code a bit and finally got what I wanted.

Opengl Cant set color to solid with glColor3f(1.0f, 0.0f, 0.0f) very opaque red

in image appearing red opaque but i set the glColor3f(1.0f, 0.0f, 0.0f);
//command compiler g++ console tested with Visual Studio Code
//g++ GL01Hello.cpp -o GL01Hello.exe -L"C:/MinGW/freeglut/lib" -lglu32 -lopengl32 -lfreeglut -I"C:\MinGW\freeglut\include\GL"
/*
* GL01Hello.cpp:With Load Background Image and Poligon Test OpenGL/GLUT C/C++ Setup
* Tested Visual Studio Code with MinGW
* To compile with -lfreeglut -lglu32 -lopengl32 and
*/
#include <windows.h> // for MS Windows
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <iostream>
#include <stdlib.h> /* srand, rand */
#include <ctime>
#include <freeglut.h> // GLUT, include glu.h and gl.h
using namespace std;
float spin = 0.0;
GLuint texture = 0;
int w1 = 0;
int h1 = 0;
// for random color primitive polygon
//static GLubyte redc,greenc,bluec;
bool prim_polygonmode = false;
// glut_load_image
GLuint LoadTexture( const char * filename )
{
GLuint texture;
int width, height;
unsigned char * data;
FILE * file;
file = fopen( filename, "rb" );
if(!file)
std::cout<<"File not Found"<<std::endl;
if ( file == NULL ) return 0;
width = 1360;
height = 768;
data = (unsigned char *)malloc( width * height * 3 );
//int size = fseek(file,);
fread( data, width * height * 3, 1, file );
fclose( file );
for(int i = 0; i < width * height ; ++i)
{
int index = i*3;
unsigned char B,R;
B = data[index];
R = data[index+2];
data[index] = R;
data[index+2] = B;
}
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );//Necessary for correct elements value 4 default
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE,GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT );
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, width, height,GL_RGB, GL_UNSIGNED_BYTE, data );
free( data );
return texture;
}
/* Initialize OpenGL Graphics just n this case for colors */
void initGL() {
// Set "clearing" or background color
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Black and opaque
//randomnumber color by ctime library
srand(time(NULL));
//redc = rand()%255;
//greenc = rand()%255;
//bluec = rand()%255;
}
/* Called back when there is no other event to be handled */
void idle() {
spin = spin + 0.075;
if (spin > 360.0)
spin = 0;
glutPostRedisplay(); // Post a re-paint request to activate display()
}
/* Handler for window re-size event. Called back when the window first appears and
whenever the window is re-sized with its new width and height */
void reshape(GLsizei width, GLsizei height) { // GLsizei for non-negative integer
// Compute aspect ratio of the new window
w1 = width;
h1 = height;
if (height == 0) height = 1; // To prevent divide by 0
GLfloat aspect = (GLfloat)width / (GLfloat)height;
// Set the viewport to cover the new window
glViewport(0, 0, width, height);
// Set the aspect ratio of the clipping area to match the viewport
glMatrixMode(GL_PROJECTION); // To operate on the Projection matrix
glLoadIdentity();
if (width >= height) {
// aspect >= 1, set the height from -1 to 1, with larger width
gluOrtho2D(-1.0 * aspect, 1.0 * aspect, -1.0, 1.0);
} else {
// aspect < 1, set the width to -1 to 1, with larger height
gluOrtho2D(-1.0, 1.0, -1.0 / aspect, 1.0 / aspect);
}
}
void orthogonalStart()
{
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
gluOrtho2D(-w1/2, w1/2, -h1/2, h1/2);
glMatrixMode(GL_MODELVIEW);
}
void orthogonalEnd()
{
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
}
void background()
{
glBindTexture( GL_TEXTURE_2D, texture );
orthogonalStart();
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glPolygonOffset(1,1);
// texture width/height
const int iw = 1360;
const int ih = 768;
glPushMatrix();
glTranslatef( -iw/2, -ih/2, 0 );
glBegin(GL_QUADS);
glColor3f(1.0f, 1.0f, 1.0f); // always default color white stars, if no this line will random color same of polygon
glTexCoord2i(0,0); glVertex2i(0, 0);
glTexCoord2i(1,0); glVertex2i(iw, 0);
glTexCoord2i(1,1); glVertex2i(iw, ih);
glTexCoord2i(0,1); glVertex2i(0, ih);
glEnd();
glPopMatrix();
orthogonalEnd();
}
void display() {
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Set background color to black and opaque
glClear(GL_COLOR_BUFFER_BIT);// Clear the color buffer (background
glEnable( GL_TEXTURE_2D );
background();
gluLookAt (0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
// A SQUARE PARAMETERS
if (prim_polygonmode) { // draw polygon mode lines
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
} else {
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glPolygonOffset(1,1);
}
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glPushMatrix();
glRotatef(spin , 0., 0., 1.);
glTranslatef(50.0, 50.0, 0);
glTranslatef(-50.0, -50.0, 0);
glBegin(GL_QUADS); // Each set of 4 vertices form a quad
//glColor3ub(redc, greenc, bluec); // Random red green blue value
glColor3f(1.0f, 0.0f, 0.0f); // Random red green blue value
glVertex2f(-0.5f, -0.5f); // x, y default 0.5f values
glVertex2f( 0.5f, -0.5f);
glVertex2f( 0.5f, 0.5f);
glVertex2f(-0.5f, 0.5f);
glEnd();
//angle += 5.0f;
glPopMatrix();
// glFlush(); // Render now
glutSwapBuffers(); // Double buffered - swap the front and back buffers
}
/* Callback handler for special-key event */
void specialKeys(int key, int x, int y) {
switch (key) {
case GLUT_KEY_F1: // F1: Toggle wireframe and solid polygon
prim_polygonmode = !prim_polygonmode; // Toggle state
break;
}
}
/* Main function: GLUT runs as a console application starting at main() */
int main(int argc, char** argv) {
glutInit(&argc, argv); // Initialize GLUT
glutInitDisplayMode(GLUT_DOUBLE); // Enable double buffered mode
glutInitWindowSize(1360, 768); // Set the window's initial width & height
glutInitWindowPosition(0, 0);
// Position the window's initial top-left corner
glutCreateWindow("OpenGL Setup Test"); // Create a window with the given title
glutSpecialFunc(specialKeys); // Register callback handler for special-key event
glutDisplayFunc(display); // Register display callback handler for window re-paint
glutReshapeFunc(reshape);
glutIdleFunc(idle);
// GLuint texture;
texture = LoadTexture( "stars.bmp" );
initGL();
glutMainLoop();// Enter the event-processing loop
//Free our texture
glDeleteTextures( 1, &texture );
return 0;
}
This code have a set background and small animation of square.
Dont know wihy cant set more the solid colors. Then the wireframe square i got a very litle line red need got the bright color red.Maybe any filte, ou buffer causing that?
if possible please help me.
OpenGL is a state engine. Once a state is set, it is persistent until it is change again.
This means if 2 dimensional texturing is enabled, all the following geometry is "textured".
Note, when glVertex2f is called then the current texture coordinate is associated with the vertex coordinate. If you don't explicitly set a texture coordinate, then the last texture coordinate which was set is still the current texture coordinate and will be associated to the vertex coordinate. This may cause a random like behavior.
If texturing is enabled, then by default the color of the texel is multiplied by the current color, because by default the texture environment mode (GL_TEXTURE_ENV_MODE) is GL_MODULATE. See glTexEnv.
That all means:
If you want to draw a geometry with a texture then enable texturing and set a "white" color:
glEnable(GL_TEXTURE_2D)
glColor3f(1.0f, 1.0f, 1.0f);
background();
If you want to draw a uniform colored geometry, then set the color and disable texturing:
glDisable(GL_TEXTURE_2D);
glColor3f(1.0f, 0.0f, 0.0f);
glBegin(GL_QUADS);
// [...]
glEnd();

How to debug openGL code?

I have problem with openGL debugging. I find that a lot of the time, OpenGL will show you it failed by not drawing anything. Every time code looks fine but it is not drawing anything on GL window.
For e.g consider the below code.I write it to draw the cube but it is not drawing anything and i am unable to find the cause.
========================================================
// cube_vertex_array.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include <glut.h>
static GLfloat vertex[]=
{
100.0,100.0,0.0,
400.0,100.0,0.0,
400.0,400.0,0.0,
100.0,400.0,0.0,
100.0,100.0,-300.0,
400.0,100.0,-300.0,
400.0,400.0,-300.0,
100.0,400.0,-300.0
};
static GLfloat color[]=
{
1.0,0.0,0.0,
0.0,1.0,0.0,
0.0,0.0,1.0,
1.0,1.0,0.0,
1.0,0.0,1.0,
0.0,1.0,1.0
};
static GLubyte frontIndices[] = {0,1,2,3};
static GLubyte leftIndices[] = {1,5,6,2};
static GLubyte backIndices[] = {4,7,6,5};
static GLubyte rightIndices[] = {0,3,7,4};
static GLubyte topIndices[] = {3,2,6,7};
static GLubyte bottomIndices[] = {0,4,5,1};
void init(void)
{
glClearColor(0.0,0.0,0.0,0.0); //Set default background color to black.
glClearDepth(2.0); //Set the depth level for clearing depth buffer.
glShadeModel(GL_FLAT); //Set the shading model to FLAT
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Clear the color and depth buffer.
}
void Display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Clear the color and depth buffer.
glColor3f(1.0,0.0,0.0);
//glBegin(GL_LINE_STRIP);
// glVertex3f(0.0,0.0,0.0);
// glVertex3f(200.0,100.0,0.0);
//glEnd();
glEnableClientState(GL_VERTEX_ARRAY); //Enable vertex array.
glEnableClientState(GL_COLOR_ARRAY); //Enable vertex array color.
glColorPointer(3,GL_FLOAT,0,color); //Specify the array for colors.
glVertexPointer(3,GL_FLOAT,0,vertex); //Specify the array for vertex.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,frontIndices); //Draw front face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,leftIndices); //Draw left face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,backIndices); //Draw back face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,rightIndices); //Draw right face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,topIndices); //Draw top face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,bottomIndices); //Draw bottom face.
glutSwapBuffers(); //Swap the buffers.
}
void Reshape(int w,int h)
{
glViewport(0.0,(GLsizei)w,0.0,(GLsizei)h); //Set the viewport according to new window size.
glMatrixMode(GL_PROJECTION); //Set matrix mode to projection.
glLoadIdentity(); //Replace the top matrix in the stack to the identity matrix.
gluOrtho2D(0.0,(GLdouble)w,0.0,(GLdouble)h); //Set the orthographic projection.
glMatrixMode(GL_MODELVIEW); //Set matrix mode to modelview.
}
int main(int argc, char **argv)
{
glutInit(&argc,argv); //Initialize the glut.
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); //Set display mode and also enable double buffering.
glutInitWindowSize(500,500); //Set the initial window size.
glutCreateWindow("Cube"); //Create the window and also assign name to it.
init(); //Initialize the app.
glutDisplayFunc(Display); //Register the Display function.
glutReshapeFunc(Reshape); //Register the Reshape function.
glutMainLoop(); //Start the main loop.
return 0;
}
You have put GL_UNSIGNED_BYTE as the type parameter in glDrawElements(). This will cause openGL to interpret the array of indices you throw in as one byte per index. You should use GL_UNSIGNED_INT here instead.
Here's the working code based on the code your provided (I did port it to java though):
import java.nio.ByteBuffer;
import org.lwjgl.BufferUtils;
import org.lwjgl.LWJGLException;
import org.lwjgl.opengl.Display;
import org.lwjgl.opengl.DisplayMode;
import static org.lwjgl.opengl.GL11.*;
public class GLTest {
public static void main(String[] args) {
try {
Display.create();
Display.setDisplayMode(new DisplayMode(500, 500));
Display.setResizable(true);
//the same arrays as the ones you specified.
float[] vertices = new float[]{100.0f,100.0f,0.0f,
400.0f,100.0f,0.0f,
400.0f,400.0f,0.0f,
100.0f,400.0f,0.0f,
100.0f,100.0f,-300.0f,
400.0f,100.0f,-300.0f,
400.0f,400.0f,-300.0f,
100.0f,400.0f,-300.0f};
float[] color = new float[]{1,0,0,
0,1,0,
0,0,1,
1,1,0,
1,0,1,
0,1,1};
int[] frontIndices = new int[]{0, 1, 2, 3};
//JWJGL bookkeeping..
ByteBuffer vertexBuffer = BufferUtils.createByteBuffer(vertices.length * 4);
ByteBuffer colourBuffer = BufferUtils.createByteBuffer(color.length * 4);
for(int i = 0; i < vertices.length; i++) {
vertexBuffer.putFloat(vertices[i]);
}
vertexBuffer.rewind();
for(int i = 0; i < color.length; i++) {
colourBuffer.putFloat(color[i]);
}
colourBuffer.rewind();
ByteBuffer indexBuffer = BufferUtils.createByteBuffer(4 * frontIndices.length);
for(int i = 0; i < frontIndices.length; i++) {
indexBuffer.putInt(frontIndices[i]);
}
indexBuffer.rewind();
//back you your code
glClearColor(1,1,1,1);
glShadeModel(GL_SMOOTH);
while(!Display.isCloseRequested()) {
glViewport(0, 0, Display.getWidth(), Display.getHeight());
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0,Display.getWidth(), 0, Display.getHeight(), -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(3, GL_FLOAT, 0, colourBuffer);
glVertexPointer(3, GL_FLOAT, 0, vertexBuffer);
glDrawElements(GL_QUADS, 4, GL_UNSIGNED_INT, indexBuffer);
Display.update();
Display.sync(60);
}
} catch (LWJGLException e) {
e.printStackTrace();
}
}
}
Which results in:
use tools like glTrace / glIntercept (to look at OpenGL call trace), gDebugger (to visualize textures, shaders, OGL state etc.)
There is a list of OpenGL debugging tools here : https://www.opengl.org/wiki/Debugging_Tools
Also your code is using the old fixed pipeline which is considered deprecated since OpenGL 3.3, so i would recommend either not putting the tag "opengl-3" on your questions, or using opengl 3.3 core context and learning the "modern" OpenGL (which is more powerful and more difficult to learn but makes you understand how the GPU works).

OpenCL not Updating OpenGL VBO

I'm taking the first step into OpenCL coding. I have a framework that I know can at least take an array from the CPU, do an operation in OpenCL, then read back the array (with the right answer). I'm currently trying to improve this by adding a displaced mesh as found in this OpenCL example (slides 18-23; only significant improvement is I changed the VBO to a float3 instead of a float4).
I have set up a shared context as per earlier in those slides and this resource. I tested the VBO with CPU input data (so I know it draws correctly). Also, I create the context before the VBO (as motivated by this thread). Finally, I tried reworking the kernel into the following [edited]:
__kernel void sine_wave(__global float3* pos, int width, int height, float time) {
uint x = get_global_id(0); uint y = get_global_id(1);
pos[y*width+x] = (float3)(1.0f,1.0f,1.0f);
}
Yet, no matter what I do, I cannot get the OpenCL program to update anything. There are no errors, nothing, yet the VBO remains the same as the input data. If I do not specify input data, the points all render at (0,0,0). I can't figure out what could cause this.
Ideas? Thanks,
Ian
PS #1: current system is NVIDIA GTX 580M, on Windows 7 x64, though the code written is intended to be portable.
PS #2: I can provide code if no one has any clues . . .
Well, I figured it out. After further hours of searching, I downloaded NVIDIA's GPU computing toolkit, which appears to be where the linked demo derives from. I then reduced their code down immensely to the following ~220 line source (may it help ye future coders):
#pragma comment(lib,"Opengl32.lib")
#pragma comment(lib,"glu32.lib")
#pragma comment(lib,"OpenCL.lib")
#pragma comment(lib,"glew32.lib")
#pragma comment(lib,"glut32.lib")
// OpenGL Graphics Includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <OpenGL/OpenGL.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#ifdef UNIX
#include <GL/glx.h>
#endif
#endif
#include <CL/opencl.h>
// Rendering window vars
const unsigned int window_width = 512;
const unsigned int window_height = 512;
const unsigned int mesh_width = 256;
const unsigned int mesh_height = 256;
// OpenCL vars
cl_context cxGPUContext;
cl_device_id* cdDevices;
cl_command_queue cqCommandQueue;
cl_kernel ckKernel;
cl_mem vbo_cl;
cl_program cpProgram;
size_t szGlobalWorkSize[] = {mesh_width, mesh_height};
// vbo variables
GLuint vbo;
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -3.0;
void mouse(int button, int state, int x, int y) {
if (state == GLUT_DOWN) {
mouse_buttons |= 1<<button;
} else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void motion(int x, int y) {
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1) {
rotate_x += dy * 0.2f;
rotate_y += dx * 0.2f;
} else if (mouse_buttons & 4) {
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
void DisplayGL(void) {
static float anim = 0.0f;
// run OpenCL kernel to generate vertex positions
glFinish();
clEnqueueAcquireGLObjects(cqCommandQueue, 1, &vbo_cl, 0,0,0);
clSetKernelArg(ckKernel, 3, sizeof(float), &anim);
clEnqueueNDRangeKernel(cqCommandQueue, ckKernel, 2, NULL, szGlobalWorkSize, NULL, 0,0,0 );
clEnqueueReleaseGLObjects(cqCommandQueue, 1, &vbo_cl, 0,0,0);
clFinish(cqCommandQueue);
// set view matrix
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 0.0, 0.0);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height);
glDisableClientState(GL_VERTEX_ARRAY);
// flip backbuffer to screen
glutSwapBuffers();
anim += 0.01f;
}
void timerEvent(int value) {
glutPostRedisplay();
glutTimerFunc(10, timerEvent,0);
}
int main(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition (glutGet(GLUT_SCREEN_WIDTH)/2 - window_width/2, glutGet(GLUT_SCREEN_HEIGHT)/2 - window_height/2);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("OpenCL/GL Interop (VBO)");
glutDisplayFunc(DisplayGL);
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutTimerFunc(10, timerEvent,0);
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
glViewport(0, 0, window_width, window_height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//Get the NVIDIA platform
cl_platform_id cpPlatform;
clGetPlatformIDs(1,&cpPlatform,NULL);
// Get the number of GPU devices available to the platform
cl_uint uiDevCount;
clGetDeviceIDs(cpPlatform, CL_DEVICE_TYPE_GPU, 0, NULL, &uiDevCount);
// Create the device list
cdDevices = new cl_device_id [uiDevCount];
clGetDeviceIDs(cpPlatform, CL_DEVICE_TYPE_GPU, uiDevCount, cdDevices, NULL);
// Define OS-specific context properties and create the OpenCL context
#if defined (__APPLE__)
CGLContextObj kCGLContext = CGLGetCurrentContext();
CGLShareGroupObj kCGLShareGroup = CGLGetShareGroup(kCGLContext);
cl_context_properties props[] =
{
CL_CONTEXT_PROPERTY_USE_CGL_SHAREGROUP_APPLE, (cl_context_properties)kCGLShareGroup,
0
};
cxGPUContext = clCreateContext(props, 0,0, NULL, NULL, &ciErrNum);
#else
#ifdef UNIX
cl_context_properties props[] =
{
CL_GL_CONTEXT_KHR, (cl_context_properties)glXGetCurrentContext(),
CL_GLX_DISPLAY_KHR, (cl_context_properties)glXGetCurrentDisplay(),
CL_CONTEXT_PLATFORM, (cl_context_properties)cpPlatform,
0
};
cxGPUContext = clCreateContext(props, 1, &cdDevices[uiDeviceUsed], NULL, NULL, &ciErrNum);
#else // Win32
cl_context_properties props[] =
{
CL_GL_CONTEXT_KHR, (cl_context_properties)wglGetCurrentContext(),
CL_WGL_HDC_KHR, (cl_context_properties)wglGetCurrentDC(),
CL_CONTEXT_PLATFORM, (cl_context_properties)cpPlatform,
0
};
cxGPUContext = clCreateContext(props, 1, &cdDevices[0], NULL, NULL, NULL);
#endif
#endif
// create a command-queue
cqCommandQueue = clCreateCommandQueue(cxGPUContext, cdDevices[0], 0, NULL);
const char* cSourceCL = "__kernel void sine_wave(__global float4* pos, unsigned int width, unsigned int height, float time)\n"
"{\n"
" unsigned int x = get_global_id(0);\n"
" unsigned int y = get_global_id(1);\n"
"\n"
" // calculate uv coordinates\n"
" float u = x / (float) width;\n"
" float v = y / (float) height;\n"
" u = u*2.0f - 1.0f;\n"
" v = v*2.0f - 1.0f;\n"
"\n"
" // calculate simple sine wave pattern\n"
" float freq = 4.0f;\n"
" float w = sin(u*freq + time) * cos(v*freq + time) * 0.5f;\n"
"\n"
" // write output vertex\n"
" pos[y*width+x] = (float4)(u, w, v, 1.0f);\n"
"}\n";
cpProgram = clCreateProgramWithSource(cxGPUContext, 1, (const char **) &cSourceCL, NULL, NULL);
clBuildProgram(cpProgram, 0, NULL, "-cl-fast-relaxed-math", NULL, NULL);
// create the kernel
ckKernel = clCreateKernel(cpProgram, "sine_wave", NULL);
// create VBO (if using standard GL or CL-GL interop), otherwise create Cl buffer
unsigned int size = mesh_width * mesh_height * 4 * sizeof(float);
glGenBuffers(1,&vbo);
glBindBuffer(GL_ARRAY_BUFFER,vbo);
// initialize buffer object
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
// create OpenCL buffer from GL VBO
vbo_cl = clCreateFromGLBuffer(cxGPUContext, CL_MEM_WRITE_ONLY, vbo, NULL);
// set the args values
clSetKernelArg(ckKernel, 0, sizeof(cl_mem), (void *) &vbo_cl);
clSetKernelArg(ckKernel, 1, sizeof(unsigned int), &mesh_width);
clSetKernelArg(ckKernel, 2, sizeof(unsigned int), &mesh_height);
glutMainLoop();
}
After comparison with my original code, I (eventually) found the key difference.
Right:
clEnqueueNDRangeKernel(context->command_queue, kernel->kernel, 2, NULL, global,NULL, 0,0,0 );
Wrong:
clEnqueueNDRangeKernel(context->command_queue, kernel->kernel, 2, NULL, global,local, 0,0,0 );
It turns out that the grid size I was using, 10x10, was smaller than the examples I had seen elsewhere, which told me to use 16x16 for "local". Because "global" is the grid size, "global" was smaller than "local".
For some reason this didn't cause any errors, though at this point I honestly can't say I understand these variables' purposes completely.
Ian