So basically I am using the windows api to create an emty window and then I use OpenGL to draw to that window from different threads. I managed to do this just with one thread, but getting and dispatching system messages so that the window is usable was slowing down the frame rate I was able to get, so I'm trying to get another thread to do that in parallel while I draw in the main thread.
To do this I have a second thread which creates an empty window and enters an infinite loop to handle the windows message loop. Before entering the loop it passes the HWND of the empty window to the main thread so OpenGl can be initialised. To do that I use the PostThreadMessage function and use the message code WM_USER and the wParam of the message to send the window handler back. Here is the code to that secondary thread:
bool t2main(DWORD parentThreadId, int x = 0, int y = 0, int w = 256, int h = 256, int pixelw = 2, int pixelh = 2, const char* windowName = "Window") {
// Basic drawing values
int sw = w, sh = h, pw = pixelw, ph = pixelh;
int ww = 0; int wh = 0;
// Windows API window handler
HWND windowHandler;
// Calculate total window dimensions
ww = sw * pw; wh = sh * ph;
// Create the window handler
WNDCLASS wc;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
wc.hInstance = GetModuleHandle(nullptr);
wc.lpfnWndProc = DefWindowProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.lpszMenuName = nullptr;
wc.hbrBackground = nullptr;
wc.lpszClassName = "windowclass";
RegisterClass(&wc);
DWORD dwExStyle = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE;
DWORD dwStyle = WS_CAPTION | WS_SYSMENU | WS_VISIBLE | WS_THICKFRAME;
RECT rWndRect = { 0, 0, ww, wh };
AdjustWindowRectEx(&rWndRect, dwStyle, FALSE, dwExStyle);
int width = rWndRect.right - rWndRect.left;
int height = rWndRect.bottom - rWndRect.top;
windowHandler = CreateWindowEx(dwExStyle, "windowclass", windowName, dwStyle, x, y, width, height, NULL, NULL, GetModuleHandle(nullptr), NULL);
if(windowHandler == NULL) { return false; }
PostThreadMessageA(parentThreadId, WM_USER, (WPARAM) windowHandler, 0);
for(;;) {
MSG msg;
PeekMessageA(&msg, NULL, 0, 0, PM_REMOVE);
DispatchMessageA(&msg);
}
}
This function gets called from the main entry point, which correctly recieves the window handler and then tries to setup OpenGL with it. Here is the code:
int main() {
// Basic drawing values
int sw = 256, sh = 256, pw = 2, ph = 2;
int ww = 0; int wh = 0;
const char* windowName = "Window";
// Thread stuff
DWORD t1Id, t2Id;
HANDLE t1Handler, t2Handler;
// Pixel array
Pixel* pixelBuffer = nullptr;
// OpenGl device context to draw
HDC glDeviceContext;
HWND threadWindowHandler;
t1Id = GetCurrentThreadId();
std::thread t = std::thread(&t2main, t1Id, 0, 0, sw, sh, pw, ph, windowName);
t.detach();
t2Handler = t.native_handle();
t2Id = GetThreadId(t2Handler);
while(true) {
MSG msg;
PeekMessageA(&msg, NULL, WM_USER, WM_USER + 100, PM_REMOVE);
if(msg.message == WM_USER) {
threadWindowHandler = (HWND) msg.wParam;
break;
}
}
// Initialise OpenGL with thw window handler that we just created
glDeviceContext = GetDC(threadWindowHandler);
PIXELFORMATDESCRIPTOR pfd = {
sizeof(PIXELFORMATDESCRIPTOR), 1,
PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PFD_MAIN_PLANE, 0, 0, 0, 0
};
int pf = ChoosePixelFormat(glDeviceContext, &pfd);
SetPixelFormat(glDeviceContext, pf, &pfd);
HGLRC glRenderContext = wglCreateContext(glDeviceContext);
wglMakeCurrent(glDeviceContext, glRenderContext);
// Create an OpenGl buffer
GLuint glBuffer;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glBuffer);
glBindTexture(GL_TEXTURE_2D, glBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Create a pixel buffer to hold the screen data and allocate space for it
pixelBuffer = new Pixel[sw * sh];
for(int32_t i = 0; i < sw * sh; i++) {
pixelBuffer[i] = Pixel();
}
// Test a pixel
pixelBuffer[10 * sw + 10] = Pixel(255, 255, 255);
// Push the current buffer into view
glViewport(0, 0, ww, wh);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sw, sh, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelBuffer);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0f, -1.0f, 0.0f);
glEnd();
SwapBuffers(glDeviceContext);
for(;;) {}
}
To hold the pixel information I'm using this struct:
struct Pixel {
union {
uint32_t n = 0xFF000000; //Default 255 alpha
struct {
uint8_t r; uint8_t g; uint8_t b; uint8_t a;
};
};
Pixel() {
r = 0;
g = 0;
b = 0;
a = 255;
}
Pixel(uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha = 255) {
r = red;
g = green;
b = blue;
a = alpha;
}
};
When I try to run this code I don't get the desired pixel output, instead I just get the empty window, as if OpenGl handn't initialised correctly. When I use the same code but all into one thread I get the empty window with the pixel in it. What am I doing wrong here?, Is there something I need to do before I initialise OpenGl in another thread? I apreciate all kind of feedback. Thanks in advance.
There are several issues here. Let's address them in order.
First let's recall the rules of:
OpenGL and threads
The basic rules about OpenGL with regard to windows, device context and threads are:
An OpenGL context is not associated with a particular window or device context.
You can make a OpenGL context "current" on any device context (HDC, usually associated with a Window) that is compatible to the device context with which the context was original created with.
An OpenGL context can be "current" on only one thread at a time, or not be active at all.
To move OpenGL context "current state" from one thread to another you do:
first: unmake "current" the context on the thread it's currently used on
second: make it "current" on the thread you want to be current on.
More than one (including all) threads in a process can have a OpenGL context "current" at the same time.
Multiple OpenGL contexts (including all) – which will be rule 5 be current in different threads – can be current with the same device context (HDC) at the same time.
There are no defined rules for drawing commands happening concurrently on different threads, but current on the same HDC. Ordering must happen by the user, by placing appropriate locks that work OpenGL synchronization primitives. Until the introduction of explicit, fine grains synchronization objects into OpenGL the only synchronization available were glFinish and the implicit synchronization point calls of OpenGL (e.g. glReadPixels).
Misconceptions in your understanding what OpenGL does
This comes from reading the comments in your code:
int main() {
Why is your thread function called main. main is a reserved name, exclusively to be used for the process entry function. Even if your entry is WinMain you must not use main as a functio name.
// Pixel array
Pixel* pixelBuffer = nullptr;
It's unclear what the pixelBuffer is meant for, later on. You will call it on a texture. but apparently don't set up the drawing to use a texture.
t1Id = GetCurrentThreadId();
std::thread t = std::thread(&t2main, t1Id, 0, 0, sw, sh, pw, ph, windowName);
t.detach();
t2Handler = t.native_handle();
t2Id = GetThreadId(t2Handler);
What, I don't even. What is this supposed to do in the first place? First things first: Don't mix Win32 threads API and C++ std::thread. Decice in one, and stick with it.
while(true) {
MSG msg;
PeekMessageA(&msg, NULL, WM_USER, WM_USER + 100, PM_REMOVE);
if(msg.message == WM_USER) {
threadWindowHandler = (HWND) msg.wParam;
break;
}
}
Why the hell are you passing the window handle through a thread message? This is so wrong on so many levels. Threads all live in the same address space, so you could use a queue, or global variables, or pass is as parameter to the thread entry function, etc., etc.
Furthermore you could just have created the OpenGL context in the main thread and then just passed it over.
wglMakeCurrent(glDeviceContext, glRenderContext);
// Create an OpenGl buffer
GLuint glBuffer;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glBuffer);
That doesn't create an OpenGL buffer object, it creates a texture name.
glBindTexture(GL_TEXTURE_2D, glBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Create a pixel buffer to hold the screen data and allocate space
pixelBuffer[10 * sw + 10] = Pixel(255, 255, 255);for it
Uhh, no, you don't supply drawable buffers to OpenGL in that way. Heck, you don't even supply draw buffers to OpenGL explicitly at all (this is not D3D12, Metal or Vulkan, where you do).
// Push the current buffer into view
glViewport(0, 0, ww, wh);
Noooo. That's not what glViewport does!
glViewport is part of the transformation pipeline state and ultimately is sets the destination rectangle of where inside a drawable the clip space volume will be mapped to. It does absolutely nothing with respect to the drawable buffers.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sw, sh, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelBuffer);
I think you don't understand what a texture is for. What this call does is, copying over the contexts of pixelBuffer into the currently bound texture. After that OpenGL is no longer concerned with pixelBuffer at all.
glBegin(GL_QUADS);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0f, -1.0f, 0.0f);
glEnd();
Here you draw something, but never enabled the use of the texture in the first place. So all that ado about setting up the texture is for nothing.
SwapBuffers(glDeviceContext);
for(;;) {}
}
So after swapping the window buffers you make the thread spin forever. Two problems with that: There is still the main message loop over in the other thread that does handle other messages for the window. Including maybe WM_PAINT, and depending on if you've set a background brush and/or how you handle WM_ERASEBKGND whatever you just draw might instantly vanish thereafter.
And by spinning the thread you're consuming CPU time for no reason whatsover. You could just as well end the thread.
I solved the problem with the help of #datenwolf's comment primarly. Firstly, I used variable pointer to pass variables between threads, which removed the need for PostThreadMessageA, which was the main reasson why I was using winapi threads in the first place. I also changed the OpenGl code a bit and finally got what I wanted.
Related
I Have Just Started Learning D3D and my code was working all fine till i implemented the D3D Shader Compiler Stuff to my code.
I am Using the tutorial on DirectXTutorials. if i just copy paste the code from there on to a new project, the program compiles fine.
However i have put my code in different classes unlike the tutorial. It is giving me error when i try to compile my saying: Syntax Error: "TextMetrica" (Compiling Direct3DRenderer.cpp).
Here is the Direct3DRenderer File:
#include "Window.h"
#include "Direct3DRenderer.h"
#include "Vertex.h"
Renderer::Renderer(HWND hw)
{
OutputDebugString("Direct3D Initializing\n");
DXGI_SWAP_CHAIN_DESC scd;
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC)); // ZERO OUT SCD
scd.BufferCount = 1; // HOW MANY BACKBUFFERS WE WANT
scd.OutputWindow = hw; // HANDLE TO THE OUTPUT WINDOW
scd.Windowed = true; // SHOULD WINDOW BE IN WINDOWED MODE BY DEFAULT
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // BUFFER FORMAT
scd.BufferDesc.Width = 800; // BUFFER WIDTH
scd.BufferDesc.Height = 600; // BUFFER HEIGHT
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // USE SWAP CHAIN AS OUTPUT TARGET
scd.SampleDesc.Count = 4; // MSAA COUNT
scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; // FLAGS
if (D3D11CreateDeviceAndSwapChain(
NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
NULL,
NULL,
NULL,
D3D11_SDK_VERSION,
&scd,
&swapchain,
&dev,
NULL,
&context
) == SEVERITY_SUCCESS)
{
OutputDebugString("SUCCESS\n");
// Get The Address of BackBuffer
ID3D11Texture2D* pbuffer;
swapchain->GetBuffer(0, _uuidof(ID3D11Texture2D), (LPVOID*)& pbuffer);
// Create a Render Target COM Object from the buffer
dev->CreateRenderTargetView(pbuffer, NULL, &RenderTarget);
pbuffer->Release();
// Set Our RenderTarget as the back buffer
context->OMSetRenderTargets(1, &RenderTarget, NULL);
// Create Our Viewport
viewport.Height = 800;
viewport.Width = 600;
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
context->RSSetViewports(1, &viewport);
InitPipeline();
InitGraphics();
}
else
{
OutputDebugString("ERROR\n");
}
}
Renderer::~Renderer()
{
OutputDebugString("Direct3D Cleanup Phase Started.\n");
swapchain->SetFullscreenState(FALSE, NULL);
swapchain->Release();
context->Release();
RenderTarget->Release();
VS->Release();
PS->Release();
dev->Release();
OutputDebugString("Direct3D Cleanup Phase Completed.\n");
}
void Renderer::InitPipeline()
{
// Compile Shaders from file
D3DX11CompileFromFile("shaders.shader", 0, 0, "VShader", "vs_4_0", 0, 0, 0, &compiled_vs, 0, 0);
D3DX11CompileFromFile("shaders.shader", 0, 0, "PShader", "ps_4_0", 0, 0, 0, &compiled_ps, 0, 0);
// Convert Compiled Shaders to COM Shader Objects
dev->CreateVertexShader(compiled_vs->GetBufferPointer(), compiled_vs->GetBufferSize(), NULL, &VS);
dev->CreatePixelShader(compiled_ps->GetBufferPointer(), compiled_ps->GetBufferSize(), NULL, &PS);
// Sets the shaders to the device / Activates the shader
context->VSSetShader(VS, 0, 0);
context->PSSetShader(PS, 0, 0);
// Create the Input Layout
D3D11_INPUT_ELEMENT_DESC VertexElementDesc[] = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0}
};
dev->CreateInputLayout(VertexElementDesc, 2, compiled_vs->GetBufferPointer(), compiled_vs->GetBufferSize(), &InputLayout);
context->IASetInputLayout(InputLayout);
}
void Renderer::InitGraphics() {
// Create Buffer so we can duplicate data from system memory to graphics memory
ZeroMemory(&VBufferDesc, sizeof(VBufferDesc));
VBufferDesc.ByteWidth = sizeof(Vertex) * 3;
VBufferDesc.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
VBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
VBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
dev->CreateBuffer(&VBufferDesc, NULL, &VBuffer);
Vertex OurVertices[] =
{
{0.0f, 0.5f, 0.0f, D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f)},
{0.45f, -0.5, 0.0f, D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f)},
{-0.45f, -0.5f, 0.0f, D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f)}
};
// we need to map to avoid issues
D3D11_MAPPED_SUBRESOURCE mapRes;
context->Map(VBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &mapRes);
memcpy(mapRes.pData, OurVertices, sizeof(OurVertices));
context->Unmap(VBuffer, NULL);
}
void Renderer::RenderFrame()
{
context->ClearRenderTargetView(RenderTarget, D3DXCOLOR(0.2, 0.4, 0.6, 1.0));
// We can do the rendering here
UINT stride = sizeof(Vertex);
UINT offset = 0;
context->IASetVertexBuffers(0, 1, &VBuffer, &stride, &offset);
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
context->Draw(3, 0);
// Swap Buffer
swapchain->Present(0, 0);
}
The TEXTMETRICA Identifier error is actually in D3DX10Core.h. i have peeked in the definition and that file seems to be giving me the error. this identifier's definition should be in gdi file.
I have added the include paths and lib paths to the directx(June 2010) SDK and also tried specifying d3dx10.lib, d3dx11.lib, d3d11.lib in the project's addition dependency on the all configurations setting. I am new so i dont know what i am doing wrong. if any more code is required please comment about it.
Keep in mind that most of the Internet tutorials for DirectX 11 are a bit outdated. In particular, you don't actually need the legacy DirectX SDK. See this blog post, this one, and this one.
If using VS 2015/2017/2019 and you still want to use the legacy DirectX SDK, then you have to set up the include/lib paths in a particular way or you'll get problems. The details on Microsoft Docs.
You are welcome to use these older tutorials with some caveats, but you should also take a look at DirectX Tool Kit for some more 'modern' tutorials.
UPDATE: One other option for using the legacy D3DX9, D3DX10, and D3DX11 utility libraries is to use the Microsoft.DXSDK.D3DX NuGet package. This removes many of the quirks around mixing it with the modern Windows SDK and Visual C++ toolsets. It also includes the redistributable binaries with a simple side-by-side license instead of having to use legacy DXSETUP. That said, the binaries themselves are still quite ancient, have known bugs, and are no longer supported, so YMMV. See this blog post.
I have an application that creates a 3D model and exports an image from that. I use this example to do it:
#include <windows.h>
#include <GL\GL.h>
#include <GL\glu.h>
#include <GL\glut.h>
#include <opencv2\highgui.hpp>
GLfloat light_diffuse[] = { 1.0, 0.0, 0.0, 1.0 }; /* Red diffuse light. */
GLfloat light_position[] = { 1.0, 1.0, 1.0, 0.0 }; /* Infinite light location. */
GLfloat n[6][3] = { /* Normals for the 6 faces of a cube. */
{ -1.0, 0.0, 0.0 },{ 0.0, 1.0, 0.0 },{ 1.0, 0.0, 0.0 },
{ 0.0, -1.0, 0.0 },{ 0.0, 0.0, 1.0 },{ 0.0, 0.0, -1.0 } };
GLint faces[6][4] = { /* Vertex indices for the 6 faces of a cube. */
{ 0, 1, 2, 3 },{ 3, 2, 6, 7 },{ 7, 6, 5, 4 },
{ 4, 5, 1, 0 },{ 5, 6, 2, 1 },{ 7, 4, 0, 3 } };
GLfloat v[8][3]; /* Will be filled in with X,Y,Z vertexes. */
void drawBox(void)
{
int i;
for (i = 0; i < 6; i++) {
glBegin(GL_QUADS);
glNormal3fv(&n[i][0]);
glVertex3fv(&v[faces[i][0]][0]);
glVertex3fv(&v[faces[i][1]][0]);
glVertex3fv(&v[faces[i][2]][0]);
glVertex3fv(&v[faces[i][3]][0]);
glEnd();
}
}
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
drawBox();
glFlush();
}
void init(void)
{
/* Setup cube vertex data. */
v[0][0] = v[1][0] = v[2][0] = v[3][0] = -1;
v[4][0] = v[5][0] = v[6][0] = v[7][0] = 1;
v[0][1] = v[1][1] = v[4][1] = v[5][1] = -1;
v[2][1] = v[3][1] = v[6][1] = v[7][1] = 1;
v[0][2] = v[3][2] = v[4][2] = v[7][2] = 1;
v[1][2] = v[2][2] = v[5][2] = v[6][2] = -1;
/* Enable a single OpenGL light. */
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHTING);
/* Use depth buffering for hidden surface elimination. */
glEnable(GL_DEPTH_TEST);
/* Setup the view of the cube. */
glMatrixMode(GL_PROJECTION);
gluPerspective( /* field of view in degree */ 40.0,
/* aspect ratio */ 1.0,
/* Z near */ 1.0, /* Z far */ 10.0);
glMatrixMode(GL_MODELVIEW);
gluLookAt(0.0, 0.0, 5.0, /* eye is at (0,0,5) */
0.0, 0.0, 0.0, /* center is at (0,0,0) */
0.0, 1.0, 0.); /* up is in positive Y direction */
/* Adjust cube position to be asthetic angle. */
glTranslatef(0.0, 0.0, -1.0);
glRotatef(60, 1.0, 0.0, 0.0);
glRotatef(-20, 0.0, 0.0, 1.0);
}
int main(int argc, char **argv)
{
int width = 500, height = 500;
/********* i want to remove this section ************/
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutCreateWindow("red 3D lighted cube");
/********* i want to remove this section ************/
init();
display();
BYTE* result = new BYTE[3 * width *height];
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, result);
cv::Mat img(width, height, CV_8UC3);
img.data = result;
cv::flip(img, img, 0);
cv::imwrite("D:\\result_off.jpg", img);
return 0; /* ANSI C requires main to return int. */
}
It does correctly but when I run this program it creates a window and shows it then removes it.
I've tried to remove glut* function and run my program but it didn't export anything when running it. I googled it and found that I should use Framebuffer but I couldn't find any example.
How can I set my program doesn't show any window when rendering my 3D model?
Note: I want to run this program in Windows and Linux.
I just had a look into the source code I did for Windows. As it was a study for productive code (and hence uses other stuff of our productive code) I cannot provide it as is. What I present here is a stripped version which should show how it works:
// standard C/C++ header:
#include <iostream>
// Windows header:
#include <Windows.h>
using namespace std;
int main(int argc, char **argv)
{
if (argc < 3) {
cerr << "USAGE: " << argv[0]
<< " FILE [FILES...] IMG_FILE" << endl;
return -1;
}
// Import Scene Graph
// excluded: initialize importers
// excluded: import 3d files
#ifdef _WIN32
// Window Setup
// set window properties
enum { Width = 1024, Height = 768 };
WNDCLASSEX wndClass; memset(&wndClass, 0, sizeof wndClass);
wndClass.cbSize = sizeof(WNDCLASSEX);
wndClass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC | CS_DBLCLKS;
wndClass.lpfnWndProc = &DefWindowProc;
wndClass.cbClsExtra = 0;
wndClass.cbWndExtra = 0;
wndClass.hInstance = 0;
wndClass.hIcon = 0;
wndClass.hCursor = LoadCursor(0, IDC_ARROW);
wndClass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);
wndClass.lpszMenuName = 0;
wndClass.lpszClassName = "WndClass";
wndClass.hIconSm = 0;
RegisterClassEx(&wndClass);
// style the window and remove the caption bar (WS_POPUP)
DWORD style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_POPUP;
// Create the window. Position and size it.
HWND hwnd = CreateWindowEx(0,
"WndClass",
"",
style,
CW_USEDEFAULT, CW_USEDEFAULT, Width, Height,
0, 0, 0, 0);
HDC hdc = GetDC(hwnd);
// Windows OpenGL Setup
PIXELFORMATDESCRIPTOR pfd; memset(&pfd, 0, sizeof pfd);
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cDepthBits = 16;
pfd.cStencilBits = 8;
pfd.iLayerType = PFD_MAIN_PLANE;
// get the best available match of pixel format for the device context
int iPixelFormat = ChoosePixelFormat(hdc, &pfd);
// make that the pixel format of the device context
SetPixelFormat(hdc, iPixelFormat, &pfd);
// create the context
HGLRC hGLRC = wglCreateContext(hdc);
wglMakeCurrent(hdc, hGLRC);
#endif // _WIN32
// OpenGL Rendering Setup
/* excluded: init our private OpenGL binding as
* the Microsoft API for OpenGL is stuck <= OpenGL 2.0
*/
// create Render Buffer Object (RBO) for colors
GLuint rboColor = 0;
glGenRenderbuffers(1, &rboColor);
glBindRenderbuffer(GL_RENDERBUFFER, rboColor);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8, Width, Height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// create Render Buffer Object (RBO) for depth
GLuint rboDepth = 0;
glGenRenderbuffers(1, &rboDepth);
glBindRenderbuffer(GL_RENDERBUFFER, rboDepth);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, Width, Height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// create Frame Buffer Object (FBO)
GLuint fbo = 0;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
// attach RBO to FBO
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER, rboColor);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, rboDepth);
// GL Rendering Setup
// excluded: prepare our GL renderer
glViewport(0, 0, Width, Height);
glClearColor(0.525f, 0.733f, 0.851f, 1.0f);
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
/* compute projection matrix from
* - field of view (property fov)
* - aspect ratio of view
* - near/far clip distance (properties dNear and dFar).
*/
const DegreeD fov(30.0);
const double dNear = 0.1, dFar = 100.0;
const double ar = (float)Width / Height;
const double d = ::tan(fov / 2.0) * 2.0 * dNear;
// excluded: construct a projection matrix for perspective view
// excluded: determine bounding sphere of 3D scene
// excluded: compute camera and view matrix from the bounding sphere of scene
// excluded: OpenGL rendering of 3d scene
// read image from render buffer
// excluded: prepare image object to store read-back
//Image::Object img(4, Image::BottomToTop);
//img.set(Width, Height, Image::RGB24);
//const size_t bytesPerLine = (3 * Width * 4 + 3) / 4;
//glReadPixels(0, 0, Width, Height, GL_RGB, GL_UNSIGNED_BYTE, img.getData());
// store image
const string filePath = argv[argc - 1];
// excluded: export image in a supported image file format
// clean-up
// excluded: clean-up of 3D scene (incl. OpenGL rendering add-ons)
glDeleteFramebuffers(1, &fbo);
glDeleteRenderbuffers(1, &rboColor);
glDeleteRenderbuffers(1, &rboDepth);
#ifdef _WIN32
wglMakeCurrent(NULL, NULL);
wglDeleteContext(hGLRC);
#endif // _WIN32
// done
return 0;
}
I didn't check whether this even compiles (as is above). It is stripped out of code which compiles and run on Windows 10 on my side.
A Note about OpenGL and Windows:
I did the GL binding by myself because the Microsoft Windows OpenGL API does not support OpenGL 3.0 or higher. (I could've used a library like glfw instead.) This means I have to assign function addresses to function pointers (to correct function prototypes) so that I can call OpenGL functions properly using C function calls.
The availability of the functions is granted if I have appropriate H/W and the appropriate drivers installed. (There are possibilities to check whether the driver provides certain functions.)
If such bound function call fails (e.g. with a segmentation fault) possible reasons could be:
The signature of called function is wrong. (I used headers downloaded from khronos.org to grant correct prototypes. Hopefully, the driver provider did as well.)
The function does not exist in the driver. (I use functions which are part of the OpenGL standard which is supported by the installed driver. The driver supports OpenGL 4.x but I need only OpenGL 3.x (at least until now).)
The function pointers have to be initialized before I use them. (I have written an initialization which is not exposed in the code. This is where I placed the comment /* excluded: init our private OpenGL binding as the Microsoft API for OpenGL is stuck <= OpenGL 2.0 */.
To illustrate this, some code examples:
In my OpenGL init function, I do:
glGenFramebuffers
= (PFNGLGENFRAMEBUFFERSPROC)wglGetProcAddress(
"glGenFramebuffers");
and the header provides:
extern PFNGLGENFRAMEBUFFERSPROC glGenFramebuffers;
PFNGLGENFRAMEBUFFERSPROC is provided by glext.h I downloaded from kronos.org:
typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers);
wglGetProcAddress() is provided by the Microsoft Windows API.
A Note about OpenGL and Linux:
If H/W and the installed driver supports the desired OpenGL standard, functions can be used as usual by
including the necessary headers (e.g. #include <GL/gl.h>)
linking the necessary libraries (e.g. -lGL -lGLU).
derhass commented:
There is absolutely no guarantee that GL 3.x functions are exported by whatever libGL.so one is using, and even if they are exported, there is no guarantee that the function is supported (i.e. mesa uses the same frontend lib for all driver backends, but each driver may only support a subset of the functions). You have to use the extension mechanism on both platforms.
I'm not able to provide a simple recommendation how to handle this nor I've valuable practical experience about this. So, I want to provide at least these links (from khronos.org) I've found by google search:
Load OpenGL Functions
OpenGL Context
OpenGL Loading Library.
I have a WinForms application with a panel (500x500 pixels) that I want to render something in. At this point I am just trying to fill it in with a specific color. I want to use OpenGL/CUDA interop to do this.
I got the panel configured to be the region to render stuff in, however when I run my code, the panel just gets filled with the glClear(..) color, and nothing assigned by the kernel is displayed. It sort of worked this morning (inconsistently), and in my attempt to sort out the SwapBuffers() mess, I think I screwed it up.
Here is the pixel format initialization for OpenGL. It seems to work fine, I have the two buffers as I expected, and the context is correct:
static PIXELFORMATDESCRIPTOR pfd=
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
16, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
GLint iPixelFormat;
// get the device context's best, available pixel format match
if((iPixelFormat = ChoosePixelFormat(hdc, &pfd)) == 0)
{
MessageBox::Show("ChoosePixelFormat Failed");
return 0;
}
// make that match the device context's current pixel format
if(SetPixelFormat(hdc, iPixelFormat, &pfd) == FALSE)
{
MessageBox::Show("SetPixelFormat Failed");
return 0;
}
if((m_hglrc = wglCreateContext(m_hDC)) == NULL)
{
MessageBox::Show("wglCreateContext Failed");
return 0;
}
if((wglMakeCurrent(m_hDC, m_hglrc)) == NULL)
{
MessageBox::Show("wglMakeCurrent Failed");
return 0;
}
After this is done, I set up the ViewPort as such:
glViewport(0,0,iWidth,iHeight); // Reset The Current Viewport
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glLoadIdentity(); // Reset The Modelview Matrix
glEnable(GL_DEPTH_TEST);
Then I set up the clear color and do a clear:
glClearColor(1.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
Now I set up the CUDA/OpenGL interop:
cudaDeviceProp prop; int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1; prop.minor = 0;
checkCudaErrors(cudaChooseDevice(&dev, &prop));
checkCudaErrors(cudaGLSetGLDevice(dev));
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
GLuint bufferID;
cudaGraphicsResource * resourceID;
glGenBuffers(1, &bufferID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferID);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, fWidth*fHeight*4, NULL, GL_DYNAMIC_DRAW_ARB);
checkCudaErrors(cudaGraphicsGLRegisterBuffer( &resourceID, bufferID, cudaGraphicsMapFlagsNone ));
Now I try to call my kernel (which just paints each pixel a specific color) and have that displayed.
uchar4* devPtr;
size_t size;
// First clear the back buffer:
glClearColor(1.0f, 0.5f, 0.0f, 0.0f); // orange
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkCudaErrors(cudaGraphicsMapResources(1, &resourceID, NULL));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resourceID));
animate(devPtr); // This will call the kernel and do a sync (see later)
checkCudaErrors(cudaGraphicsUnmapResources(1, &resourceID, NULL));
// Swap buffers to bring back buffer forward:
SwapBuffers(m_hDC);
At this point I expect to see the kernel colors on the screen, but no! I see orange, which is the clear color that I just set.
Here is the call to the kernel:
void animate(uchar4* dispPtr)
{
checkCudaErrors(cudaDeviceSynchronize());
animKernel<<<blocks, threads>>>(dispPtr, envdim);;
checkCudaErrors(cudaDeviceSynchronize());
}
Here envdim is just the dimensions (so 500x500). The kernel itself:
__global__ void animKernel(uchar4 *optr, dim3 matdim)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * matdim.x;
if (x < matdim.x && y < matdim.y)
{
// BLACK:
optr[offset].x = 0; optr[offset].y = 0; optr[offset].z = 0;
}
}
Things I've done:
The value returned by cudaGraphicsResourceGetMappedPointer's size is 1000000, which corresponds to the 500x500 matrix of uchar4, so that's good.
Each kernel printed the value and location that it was writing to, and that seemed ok.
Played with the alpha value for the clear color, but that doesn't seem to do anything (yet?)
Ran the animate() function several times. Don't know why I thought that would help, but I tried it...
So I guess I'm missing something, but I'm going kind of crazy looking for it. Any advice? Help?
It's another one of those questions I answer myself! Hmph, as I figured, it was a one line issue. The problem resides in the rendering call itself.
The configuration is fine, the one issue I have with the code above is:
I never called glDrawPixels(), which is necessary in order for the OpenGL driver to copy the shared buffer (GL_PiXEL_UNPACK_BUFFER_ARB) source to the display buffer. The correct rendering sequence is then:
uchar4* devPtr;
size_t size;
// First clear the back buffer:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkCudaErrors(cudaGraphicsMapResources(1, &resourceID, NULL));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resourceID));
animate(devPtr); // This will call the kernel and do a sync (see later)
checkCudaErrors(cudaGraphicsUnmapResources(1, &resourceID, NULL));
// This is necessary to copy the shared buffer to display
glDrawPixels(fWidth, fHeight, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// Swap buffers to bring back buffer forward:
SwapBuffers(m_hDC);
I'd like to thank the Acade-- uh, CUDA By Example, once again for helping me. Even though the example code from the book used GLUT (which was completely useless for this...), the book referenced normal gl functions.
I'm trying to learn OpenGL but I've not yet got the hang of it, as I encountered a problem at the first hurdle where I try to display a bright red square, but the image comes out as a maroon coloured square. (I apologize but I cannot post pictures due to not having enough reputation :( )
I've been using the SOIL library (http://www.lonesock.net/soil.html) to make the task of loading textures simpler, and I am fairly sure that this is where the problem lies.
I understand the most obvious answer is to not use SOIL, and to learn raw OGL first before I try using extensions, and I do intend to do this. However I would still like this problem solving for peace of mind.
My personal assumption is I have probably enabled some sort of shading somewhere, or there is some quirk of OGL or SOIL that forces the shade of the texture to change, however I am not experienced enough to solve this.
Below is what I believe to be the relevant code.
void displayBackground()
{
GetTexture("resources/red.png");
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex2f(0, 0);
glTexCoord2f(480, 0); glVertex2f( 480, 0);
glTexCoord2f(480, 480); glVertex2f( 480, 480);
glTexCoord2f(0, 480); glVertex2f(0, 480);
glEnd();
glDisable(GL_TEXTURE_2D);
}
And below is the SOIL-specific code which as far as I can tell should load a solid red texture into the active OGL texture
GLuint GetTexture(std::string Filename)
{
GLuint tex_ID;
tex_ID = SOIL_load_OGL_texture(
Filename.c_str(),
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_POWER_OF_TWO
| SOIL_FLAG_MIPMAPS
| SOIL_FLAG_COMPRESS_TO_DXT
| SOIL_FLAG_DDS_LOAD_DIRECT
);
if( tex_ID > 0 )
{
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, tex_ID );
return tex_ID;
}
else
return 0;
}
Thank you in advance for anyone insight into where I have possibly gone wrong.
#Nazar554
I'm assuming this is what you mean by the view port? Sorry, I'm aware this is very basic OGL stuff and I probably sound rather stupid, but you've got to start somewhere right? :P
/** OpenGL Initial Setup**/
//pixel format descriptor to describe pixel layout of a given surface
PIXELFORMATDESCRIPTOR pfd;
std::memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR));
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW |
PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER_DONTCARE;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cDepthBits = 16;
pfd.iLayerType = PFD_MAIN_PLANE;
HDC hdc = GetDC(hwnd); //gets device context of hwnd. Device context is a set of graphics objects that define how to draw to the given device
int format = ChoosePixelFormat(hdc, &pfd); //chooses best pixel format for device context given the pfd to be used
SetPixelFormat(hdc, format, &pfd);
HGLRC hglrc;
hglrc = wglCreateContext(hdc); //creates OGL rendering context suitable for drawing on the device specified by hdc
wglMakeCurrent(hdc, hglrc); //makes hglrc the thread's current context. subsequent OGL calls made on hdc
glClearColor(0.0f, 0.0f, 0.0f, 0.0f); // Red, Green, Blue, Alpha. (Additive color) Does not need to be updated every cycle
glOrtho(0, 900, 600, 1.0, -1.0, 1.0); //sets co-ordinates system
Put glOrtho before glClearColor. Also you need to select projection matrix before calling glOrtho.
Use this:
glMatrixMode(GL_PROJECTION); // select projection matrix
glLoadIdentity(); // clear it
glOrtho(0, w, h, 0, 0, 1); // compute projection matrix, and multiply identity matrix by it
// w, h is your window size if you are doing 2D
glMatrixMode(GL_MODELVIEW); // select model matrix
Also, if you are studying OpenGL better begin with modern versions (3.3+, or 2.1 without old stuff), not 1.2. They have a lot of differences and it will be hard to forget everything you studied before. For beginners freeglut or GLFW is more simple and portable than pure Win32.
I've been attempting to render text onto an openGL window using SDL and the SDL_TTF library on windows XP, VS2010.
Versions:
SDL version 1.2.14
SDL TTF devel 1.2.10
openGL (version is at least 2-3 years old).
I have successfully created an openGL window using SDL / SDL_image and can render lines / polygons onto it with no problems.
However, moving onto text it appears that there is some flaw in my current program, I am getting the following result when trying this code here
for those not willing to pastebin here are only the crutial code segments:
void drawText(char * text) {
glLoadIdentity();
SDL_Color clrFg = {0,0,255,0}; // set colour to blue (or 'red' for BGRA)
SDL_Surface *sText = TTF_RenderUTF8_Blended( fntCourier, text, clrFg );
GLuint * texture = create_texture(sText);
glBindTexture(GL_TEXTURE_2D, *texture);
// draw a polygon and map the texture to it, may be the source of error
glBegin(GL_QUADS); {
glTexCoord2i(0, 0); glVertex3f(0, 0, 0);
glTexCoord2i(1, 0); glVertex3f(0 + sText->w, 0, 0);
glTexCoord2i(1, 1); glVertex3f(0 + sText->w, 0 + sText->h, 0);
glTexCoord2i(0, 1); glVertex3f(0, 0 + sText->h, 0);
} glEnd();
// free the surface and texture, removing this code has no effect
SDL_FreeSurface( sText );
glDeleteTextures( 1, texture );
}
segment 2:
// create GLTexture out of SDL_Surface
GLuint * create_texture(SDL_Surface *surface) {
GLuint texture = 0;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// The SDL_Surface appears to have BGR_A formatting, however this ends up with a
// white rectangle no matter which colour i set in the previous code.
int Mode = GL_RGB;
if(surface->format->BytesPerPixel == 4) {
Mode = GL_RGBA;
}
glTexImage2D(GL_TEXTURE_2D, 0, Mode, surface->w, surface->h, 0, Mode,
GL_UNSIGNED_BYTE, surface->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
return &texture;
}
Is there an obvious bit of code I am missing?
Thank you for any help on this subject.
I've been trying to learn openGL and SDL for 3 days now, so please forgive any misinformation on my part.
EDIT:
I notice that using
TTF_RenderUTF8_Shaded
TTF_RenderUTF8_Solid
Throw a null pointer exception, meaning that there is an error within the actual text rendering function (I suspect), I do not know how this means TTF_RenderUTF8_Blended returns a red square but I suspect all troubles hinge on this.
I think the problem is in the glEnable(GL_TEXTURE_2D) and glDisable(GL_TEXTURE_2D) functions which must be called every time the text is painted on the screen.And maybe also the color conversion between the SDL and GL surface is not right.
I have combined create_texture and drawText into a single function that displays the text properly. That's the code:
void drawText(char * text, TTF_Font* tmpfont) {
SDL_Rect area;
SDL_Color clrFg = {0,0,255,0};
SDL_Surface *sText = SDL_DisplayFormatAlpha(TTF_RenderUTF8_Blended( tmpfont, text, clrFg ));
area.x = 0;area.y = 0;area.w = sText->w;area.h = sText->h;
SDL_Surface* temp = SDL_CreateRGBSurface(SDL_HWSURFACE|SDL_SRCALPHA,sText->w,sText->h,32,0x000000ff,0x0000ff00,0x00ff0000,0x000000ff);
SDL_BlitSurface(sText, &area, temp, NULL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sText->w, sText->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, temp->pixels);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); {
glTexCoord2d(0, 0); glVertex3f(0, 0, 0);
glTexCoord2d(1, 0); glVertex3f(0 + sText->w, 0, 0);
glTexCoord2d(1, 1); glVertex3f(0 + sText->w, 0 + sText->h, 0);
glTexCoord2d(0, 1); glVertex3f(0, 0 + sText->h, 0);
} glEnd();
glDisable(GL_TEXTURE_2D);
SDL_FreeSurface( sText );
SDL_FreeSurface( temp );
}
screenshot
I'm initializing OpenGL as follows:
int Init(){
glClearColor( 0.1, 0.2, 0.2, 1);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho( 0, 600, 300, 0, -1, 1 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if( glGetError() != GL_NO_ERROR ){
return false;
}
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_COLOR, GL_ONE_MINUS_SRC_ALPHA);
}
I think you should just add glEnable(GL_BLEND), because the code for the text surface says TTF_RenderUTF8_Blended( fntCourier, text, clrFg ) and you have to enable the blending abilities of opengl.
EDIT
Okay, I finally took the time to put your code through a compiler. Most importantly, compiler with -Werror so that warning turn into errors
GLuint * create_texture(SDL_Surface *surface) {
GLuint texture = 0;
/*...*/
return &texture;
}
I didn't see it first, because that's something like C coder's 101 and is quite unexpected: You must not return pointers to local variables!. Once the functions goes out of scope the pointer returned will point to nonsense only. Why do you return a pointer at all? Just return a integer:
GLuint create_texture(SDL_Surface *surface) {
GLuint texture = 0;
/*...*/
return texture;
}
Because of this you're also not going to delete the texture afterward. You upload it to OpenGL, but then loose the reference to it.
Your code misses a glEnable(GL_TEXTURE_2D) that's why you can't see any effects of texture. However your use of textures is suboptimal. They way you did it, you recreate a whole new texture each time you're about to draw that text. If that happens in a animation loop, you'll
run out of texture memory rather soon
slow it down significantly
(1) can be addressed by not generating a new texture name each redraw
(2) can be addresses by uploading new texture data only when the text changes and by not using glTexImage2D, but glTexSubImage2D (of course, if the dimensions of the texture change, it must be glTexImage2D).
EDIT, found another possible issue, but first fix your pointer issue.
You should make sure, that you're using GL_REPLACE or GL_MODULATE texture environment mode. If using GL_DECAL or GL_BLEND you end up with red text on a red quad.
There was leaking memory of of the function in my previous post and the program was crashing after some time...
I improved this by separating the texture loading and displaying:
The first function must be called before the SDL loop.It loads text string into memory:
Every string loaded must have different txtNum parameter
GLuint texture[100];
SDL_Rect area[100];
void Load_string(char * text, SDL_Color clr, int txtNum, const char* file, int ptsize){
TTF_Font* tmpfont;
tmpfont = TTF_OpenFont(file, ptsize);
SDL_Surface *sText = SDL_DisplayFormatAlpha(TTF_RenderUTF8_Solid( tmpfont, text, clr ));
area[txtNum].x = 0;area[txtNum].y = 0;area[txtNum].w = sText->w;area[txtNum].h = sText->h;
glGenTextures(1, &texture[txtNum]);
glBindTexture(GL_TEXTURE_2D, texture[txtNum]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, sText->w, sText->h, 0, GL_BGRA, GL_UNSIGNED_BYTE, sText->pixels);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
SDL_FreeSurface( sText );
TTF_CloseFont(tmpfont);
}
The second one displays the string, must be called in the SDL loop:
void drawText(float coords[3], int txtNum) {
glBindTexture(GL_TEXTURE_2D, texture[txtNum]);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); {
glTexCoord2f(0, 0); glVertex3f(coords[0], coords[1], coords[2]);
glTexCoord2f(1, 0); glVertex3f(coords[0] + area[txtNum].w, coords[1], coords[2]);
glTexCoord2f(1, 1); glVertex3f(coords[0] + area[txtNum].w, coords[1] + area[txtNum].h, coords[2]);
glTexCoord2f(0, 1); glVertex3f(coords[0], coords[1] + area[txtNum].h, coords[2]);
} glEnd();
glDisable(GL_TEXTURE_2D);
}