How to draw bitmap as OpenGL texture in C++? - c++

I have a bitmap, and its handle (Win32 HBITMAP). Any suggestion of how to draw this bitmap on an OpenGL quad (with scaling and pulling the 4 corners of the bitmap to fit the 4 vertexes of the quad)?

You need to retrieve the data contained in the HBITMAP, see http://msdn.microsoft.com/en-us/library/dd144879(v=vs.85).aspx Then you can upload the DIB data to OpenGL using glTexImage2D or glTexSubImage2D
With a texture being created you can apply this like usual (enable texturing, give each corner of the quad a texture coordinate).
EDIT due to comment
This (untested!) code should do the trick
GLuint load_bitmap_to_texture(
HDC device_context,
HBITMAP bitmap_handle,
bool flip_image) /* untested */
{
const int BytesPerPixel = sizeof(DWORD);
SIZE bitmap_size;
if( !GetBitmapDimensionEx(bitmap_handle, &bitmap_size) )
return 0;
ssize_t bitmap_buffer_size = bitmap_size.cx * bitmap_size.cy * BytesPerPixel;
#ifdef USE_DWORD
DWORD *bitmap_buffer;
#else
void *bitmap_buffer;
#endif
bitmap_buffer = malloc(bitmap_buffer_size);
if( !bitmap_buffer )
return 0;
BITMAPINFO bitmap_info;
memset(&bitmap_info, 0, sizeof(bitmap_info));
bitmap_info.bmiHeader.biSize = sizeof(bitmap_info.bmiHeader);
bitmap_info.bmiHeader.biWidth = bitmap_size.cx;
bitmap_info.bmiHeader.biHeight = bitmap_size.cy;
bitmap_info.bmiHeader.biPlanes = 1;
bitmap_info.bmiHeader.biBitCount = BitsPerPixel;
bitmap_info.bmiHeader.biCompression = BI_RGB;
if( flip_image ) /* this tells Windows where to set the origin (top or bottom) */
bitmap_info.bmiHeader.biHeight *= -1;
if( !GetDIBits(device_context,
bitmap_handle,
0, bitmap_size.cy,
bitmap_buffer,
&bitmap_info,
DIB_RGB_COLORS /* irrelevant, but GetDIBits expects a valid value */ )
) {
free(bitmap_buffer);
return 0;
}
GLuint texture_name;
glGenTextures(1, &texture_name);
glBindTexture(GL_TEXTURE_2D, texture_name);
glPixelStorei(GL_UNPACK_SWAP_BYTES, GL_FALSE);
glPixelStorei(GL_UNPACK_LSB_FIRST, GL_TRUE);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
bitmap_size.cx, bitmap_size.cy, 0,
GL_RGBA,
#ifdef USE_DWORD
GL_UNSIGNED_INT_8_8_8_8,
#else
GL_UNSIGNED_BYTE,
#endif
bitmap_buffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
free(bitmap_buffer);
return texture_name;
}

Related

OpenGL transparency doing weird things

I am trying to render a texture with an alpha channel in it.
This is what I used for texture loading:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
I enabled GL_BLEND just before I render the texture: glEnable(GL_BLEND);
I also did this at the beginning of the code(the initialization): glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
This is the result(It should be a transparent texture of a first person hand):
But when I load my texture like this(no alpha channel):
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
This is the result:
Does anyone know what can cause this, or do I have to give more code?
Sorry for bad English, thanks in advance.
EDIT:
My texture loading code:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
unsigned char header[54];
unsigned int dataPos;
unsigned int imageSize;
unsigned int width, height;
// Actual RGB data
unsigned char * data;
// Open the file
FILE * file = fopen(imagepath, "rb");
if (!file) { printf("%s could not be opened. \n", imagepath); getchar(); exit(0); }
// Read the header, i.e. the 54 first bytes
// If less than 54 bytes are read, problem
if (fread(header, 1, 54, file) != 54) {
printf("Not a correct BMP file\n");
exit(0);
}
// A BMP files always begins with "BM"
if (header[0] != 'B' || header[1] != 'M') {
printf("Not a correct BMP file\n");
exit(0);
}
// Make sure this is a 24bpp file
if (*(int*)&(header[0x1E]) != 0) { printf("Not a correct BMP file\n");}
if (*(int*)&(header[0x1C]) != 24) { printf("Not a correct BMP file\n");}
// Read the information about the image
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize == 0) imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
if (dataPos == 0) dataPos = 54; // The BMP header is done that way
// Create a buffer
data = new unsigned char[imageSize];
// Read the actual data from the file into the buffer
fread(data, 1, imageSize, file);
// Everything is in memory now, the file wan be closed
fclose(file);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (imagepath == "hand.bmp") {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}
As you can see its not my own written code, Ive got it from opengl-tutorial.org
My first comment stated:
The repeating, offset pattern looks like the data is treated as having a larger offset, when in reality it has smaller (or opposite).
And that was before I actually noticed what you did. Yes, this is precisely that. You can't treat 4-bytes-per-pixel data as 3-bytes-per-pixel data. The alpha channel gets interpreted as colour and that's why it all offsets this way.
If you want to disregard the alpha channel, you need to strip it off when loading so that it ends up having 3 bytes for each pixel value in the OpenGL texture memory. (That's what #RetoKoradi's answer is proposing, namely creating an RGB texture from RGBA data).
If it isn't actually supposed to look so blue-ish, maybe it's not actually in BGR layout?
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
^
\--- change to GL_RGBA as well
My wild guess is that human skin would have more red than blue light reflected by it.
It looks like you misunderstood how the arguments of glTexImage2D() work:
The 3rd argument (internalformat) defines what format you want to use for the data stored in the texture.
The 7th and 8th argument (format and type) define the format of the data you pass into the call as the last argument.
Based on this, if the format of the data you're passing as the last argument is BGRA, and you want to create an RGB texture from it, the correct call is:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
Note that the 7th argument is now GL_BGRA, matching your input data, while the 3rd argument is GL_RGB, specifying that you want to use an RGB texture.
Seams you chose worng texture pixel alignment. To specify the right one try to experiment with values (1, 2, 4) of glPixelStorei with GL_UNPACK_ALIGNMENT.
Specification:
void glPixelStorei( GLenum pname,
GLint param);
pname Specifies the symbolic name of the parameter to be set. One value affects the packing of pixel data into memory: GL_PACK_ALIGNMENT. The other affects the unpacking of pixel data from memory: GL_UNPACK_ALIGNMENT.
param Specifies the value that pname is set to.
glPixelStorei sets pixel storage modes that affect the operation of subsequent glReadPixels as well as the unpacking of texture patterns (see glTexImage2D and glTexSubImage2D).
pname is a symbolic constant indicating the parameter to be set, and param is the new value. One storage parameter affects how pixel data is returned to client memory:
GL_PACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The other storage parameter affects how pixel data is read from client memory:
GL_UNPACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The following table gives the type, initial value, and range of valid values for each storage parameter that can be set with glPixelStorei.
BMP format do not support transparency at least most common 3 version (only work GL_BGR mode and its masked modifications). USE PNG, DDS, TIFF, TGA(simplest) instead.
Secondly your total image data size computation formula is wrong
imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
Right formula is:
imageSize = 4 * ((width * bitsPerPel + 31) / 32) * height;
where bitsPerPel is current picture bits per pixel (8, 16 or 24).
Here is the code of function wich used to load simple TGA files with transparency support:
// Define targa header.
#pragma pack(1)
typedef struct
{
GLbyte identsize; // Size of ID field that follows header (0)
GLbyte colorMapType; // 0 = None, 1 = paletted
GLbyte imageType; // 0 = none, 1 = indexed, 2 = rgb, 3 = grey, +8=rle
unsigned short colorMapStart; // First colour map entry
unsigned short colorMapLength; // Number of colors
unsigned char colorMapBits; // bits per palette entry
unsigned short xstart; // image x origin
unsigned short ystart; // image y origin
unsigned short width; // width in pixels
unsigned short height; // height in pixels
GLbyte bits; // bits per pixel (8 16, 24, 32)
GLbyte descriptor; // image descriptor
} TGAHEADER;
#pragma pack(8)
GLbyte *gltLoadTGA(const char *szFileName, GLint *iWidth, GLint *iHeight, GLint *iComponents, GLenum *eFormat)
{
FILE *pFile; // File pointer
TGAHEADER tgaHeader; // TGA file header
unsigned long lImageSize; // Size in bytes of image
short sDepth; // Pixel depth;
GLbyte *pBits = NULL; // Pointer to bits
// Default/Failed values
*iWidth = 0;
*iHeight = 0;
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
// Attempt to open the fil
pFile = fopen(szFileName, "rb");
if(pFile == NULL)
return NULL;
// Read in header (binary)
fread(&tgaHeader, 18/* sizeof(TGAHEADER)*/, 1, pFile);
// Do byte swap for big vs little endian
#ifdef __APPLE__
BYTE_SWAP(tgaHeader.colorMapStart);
BYTE_SWAP(tgaHeader.colorMapLength);
BYTE_SWAP(tgaHeader.xstart);
BYTE_SWAP(tgaHeader.ystart);
BYTE_SWAP(tgaHeader.width);
BYTE_SWAP(tgaHeader.height);
#endif
// Get width, height, and depth of texture
*iWidth = tgaHeader.width;
*iHeight = tgaHeader.height;
sDepth = tgaHeader.bits / 8;
// Put some validity checks here. Very simply, I only understand
// or care about 8, 24, or 32 bit targa's.
if(tgaHeader.bits != 8 && tgaHeader.bits != 24 && tgaHeader.bits != 32)
return NULL;
// Calculate size of image buffer
lImageSize = tgaHeader.width * tgaHeader.height * sDepth;
// Allocate memory and check for success
pBits = new GLbyte[lImageSize];
if(pBits == NULL)
return NULL;
// Read in the bits
// Check for read error. This should catch RLE or other
// weird formats that I don't want to recognize
if(fread(pBits, lImageSize, 1, pFile) != 1)
{
free(pBits);
return NULL;
}
// Set OpenGL format expected
switch(sDepth)
{
case 3: // Most likely case
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
break;
case 4:
*eFormat = GL_BGRA_EXT;
*iComponents = GL_RGBA8;
break;
case 1:
*eFormat = GL_LUMINANCE;
*iComponents = GL_LUMINANCE8;
break;
};
// Done with File
fclose(pFile);
// Return pointer to image data
return pBits;
}
iWidth, iHeight return texture dimensions, eFormat i iCompoments external and internal image formats. than actual function return value is pointer to texture data.
So your function must look like:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
int width, height;
int component;
GLenum eFormat;
// Actual RGB data
char * data = LoadTGA(imagepath, &width, &height, &component, &eFormat);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (!strcmp(imagepath,"hand.tga")) { // important because we comparing strings not pointers
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}

C++ OpenGL Texture not loading

void OGLRectangle::LoadTexture(const char* filename)
{
unsigned int texture;
int width, height;
BYTE * data;
FILE * file;
file = fopen(filename, "rb");
width = 1920;
height = 1080;
data = new BYTE[height * width * 3];
fread(data, width * height * 3, 1, file);
fclose(file);
glGenTextures(1.0, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
tex = texture;
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, 2, width, height,0, GL_RGB, GL_UNSIGNED_BYTE, data);
delete [] data;
}
I have this code to render in an image, the method is called with:
LoadTexture("C:\\Users\Rhys\Documents\Hills.bmp");
The file exists.
Then I'm trying to render it to the openGL window using;
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex);
glBegin(GL_QUADS);
glTexCoord2d(0.0, 0.0); glVertex2d(0.0, 0.0);
glTexCoord2d(1.0, 0.0); glVertex2d(100.0, 0.0);
glTexCoord2d(1.0, 1.0); glVertex2d(100.0, 100.0);
glTexCoord2d(0.0, 1.0); glVertex2d(0.0, 100.0);
glEnd();
glDisable(GL_TEXTURE_2D);
However, all I'm getting on screen is a darkish blue box, with no texture rendered in it.
I have searched for tutorials on how to do this, even asked my lecturer and I still cannot seem to find out why its not working.
Any help will be greatly appreciated.
The .bmp files loading must be little different
This code simply loads bmp file to memory m_pcbData without compression and indexed color support.
bool CBMPImage::LoadFromFile(const CString& FileName)
{
BITMAPINFOHEADER BitmapInfo;
ZeroMemory(&BitmapInfo, sizeof(BITMAPINFOHEADER));
BITMAPFILEHEADER BitmapFile;
ZeroMemory(&BitmapFile, sizeof(BITMAPFILEHEADER));
std::ifstream FileStream(FileName, std::ios::binary | std::ios::in);
if (!FileStream.good())
return false;
// Read bitmap file info
FileStream.read(reinterpret_cast<char*>(&BitmapFile), sizeof(BITMAPFILEHEADER));
// Read bitmap info
FileStream.read(reinterpret_cast<char*>(&BitmapInfo), sizeof(BITMAPINFOHEADER));
// Proper bitmap file supports only 1 plane
if (BitmapInfo.biPlanes != 1)
return false;
m_cbAlphaBits = 0;
m_cbRedBits = 0;
m_cbGreenBits = 0;
m_cbBlueBits = 0;
// Retrives bits per pixel info
m_cbBitsPerPel = (BMPbyte)BitmapInfo.biBitCount;
// Width and height of image
m_nWidth = BitmapInfo.biWidth;
m_nHeight = BitmapInfo.biHeight;
// Compute bitmap file size
m_nSize = 4 * ((m_nWidth * m_cbBitsPerPel + 31) / 32) * m_nHeight;
// Less important info
m_nPixelWidthPerMeter = BitmapInfo.biXPelsPerMeter;
m_nPixelHeightPerMeter = BitmapInfo.biYPelsPerMeter;
// Indexes info not important in our case
m_nClrCount = BitmapInfo.biClrUsed;
m_nClrImportant = BitmapInfo.biClrImportant;
// COMPRESSION MUST BE BI_RGB
m_Compression = (BMPCompression)BitmapInfo.biCompression;
delete [] m_pcbData;
m_pcbData = NULL;
// Allocate proper data size
m_pcbData = new BMPbyte[m_nSize];
// Read actual image data, considering offset of file header
FileStream.seekg(BitmapFile.bfOffBits);
FileStream.read(reinterpret_cast<char*>(m_pcbData), m_nSize);
FileStream.close();
return true;
}
than load bmp texture data to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Image.GetWidth(), Image.GetHeight(), 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, (GLvoid*)Image.GetImageData());
GL_BGR_EXT is important because bmp stores image data in reverse byte order.
Secondly you must specify your material color as white because of usage that texture environment GL_TEXTURE_ENV_MODE, GL_MODULATE
And as mentioned #Reto Koradi, you must specify to generate mipmaps before texture image loading using one of these function calls.
glGenerateMipmap(GL_TEXTURE_2D);
or
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
Plus as you used not power of two textures (width = 1920;
height = 1080;) it may not work.
You're setting the attribute to sample with mipmaps:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
You should only set that if your textures actually has mipmaps. To generate mipmaps, you can call:
glGenerateMipmap(GL_TEXTURE_2D);
after the glTexImage2D() call. Or you can simply set the sampler attribute to not use mipmaps:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
As has already been pointed out: If your image file is indeed a BMP, and not just a raw image file, your image loading code will also need work.

How to egl offscreen render to an image on linux?

I'm trying to use egl to do offscreen rendering to an image.
my code doesn't generate any error. the egl part seems to be correct, the fbo is also complete. but when I read pixels using glReadPixels, I always get a black image (I cleared the entire scene with red, so the image should be red too).
I can't figure out what's wrong.
Also, I noticed that glRenderbufferStorage can only support 16bit color depth. GL_RGBA8 is consider an invalid parameter for this function. Isn't 16bit a bit low for serious opengl application?
My environment is Ubuntu 14.10 with mesa and intel graphics.
#include <QCoreApplication>
#include <QDebug>
#include <QImage>
#include <GLES2/gl2.h>
#include <EGL/egl.h>
int main(int argc, char *argv[])
{
#define CONTEXT_ES20
#ifdef CONTEXT_ES20
EGLint ai32ContextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE };
#endif
// Step 1 - Get the default display.
EGLDisplay eglDisplay = eglGetDisplay((EGLNativeDisplayType)0);
// Step 2 - Initialize EGL.
eglInitialize(eglDisplay, 0, 0);
#ifdef CONTEXT_ES20
// Step 3 - Make OpenGL ES the current API.
eglBindAPI(EGL_OPENGL_ES_API);
// Step 4 - Specify the required configuration attributes.
EGLint pi32ConfigAttribs[5];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_WINDOW_BIT;
pi32ConfigAttribs[2] = EGL_RENDERABLE_TYPE;
pi32ConfigAttribs[3] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[4] = EGL_NONE;
#else
EGLint pi32ConfigAttribs[3];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_WINDOW_BIT;
pi32ConfigAttribs[2] = EGL_NONE;
#endif
// Step 5 - Find a config that matches all requirements.
int iConfigs;
EGLConfig eglConfig;
eglChooseConfig(eglDisplay, pi32ConfigAttribs, &eglConfig, 1,
&iConfigs);
if (iConfigs != 1) {
printf("Error: eglChooseConfig(): config not found.\n");
exit(-1);
}
// Step 6 - Create a surface to draw to.
EGLSurface eglSurface;
eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig,
(EGLNativeWindowType)NULL, NULL);
// Step 7 - Create a context.
EGLContext eglContext;
#ifdef CONTEXT_ES20
eglContext = eglCreateContext(eglDisplay, eglConfig, NULL,
ai32ContextAttribs);
#else
eglContext = eglCreateContext(eglDisplay, eglConfig, NULL, NULL);
#endif
// Step 8 - Bind the context to the current thread
eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext);
GLuint fboId = 0;
GLuint renderBufferWidth = 1280;
GLuint renderBufferHeight = 720;
// create a framebuffer object
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
// create a texture object
/* GLuint textureId;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//GL_LINEAR_MIPMAP_LINEAR
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_HINT, GL_TRUE); // automatic mipmap
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, renderBufferWidth, renderBufferHeight, 0,
GL_RGB, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// attach the texture to FBO color attachment point
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, textureId, 0);
*/
qDebug() << glGetError();
GLuint renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
qDebug() << glGetError();
glRenderbufferStorage(GL_RENDERBUFFER,
GL_RGB565,
renderBufferWidth,
renderBufferHeight);
qDebug() << glGetError();
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
renderBuffer);
qDebug() << glGetError();
GLuint depthRenderbuffer;
glGenRenderbuffers(1, &depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, renderBufferWidth, renderBufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer);
// check FBO status
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE) {
printf("Problem with OpenGL framebuffer after specifying color render buffer: \n%x\n", status);
} else {
printf("FBO creation succedded\n");
}
glClearColor(1.0,0.0,0.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
qDebug() << eglSwapBuffers( eglDisplay, eglSurface);
int size = 4 * renderBufferHeight * renderBufferWidth;
printf("print size");
printf("size %d", size);
qDebug() << size;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0,0,renderBufferWidth,renderBufferHeight,GL_RGB, GL_RGB565, data2);
QImage image(data2, renderBufferWidth, renderBufferHeight,renderBufferWidth*2, QImage::Format_RGB16);
image.save("result.png");
qDebug() << "done";
QCoreApplication a(argc, argv);
return a.exec();
}
OpenGL ES 2.0 has a very limited number of formats/types that are supported for glReadPixels(). The ones you are trying to use are not guaranteed to be supported:
glReadPixels(0 ,0, renderBufferWidth, renderBufferHeight,
GL_RGB, GL_RGB565, data2);
Only two formats/types are supported:
GL_RGBA/GL_UNSIGNED_BYTE.
An implementation dependent combination.
The format and type of the implementation dependent combination can be queried with:
GLint format = 0, type = 0;
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format);
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &type);
This can give you one of the following combinations:
GL_RGB/GL_UNSIGNED_BYTE.
GL_RGB/GL_UNSIGNED_SHORT_5_6_5.
GL_RGBA/GL_UNSIGNED_SHORT_4_4_4_4.
GL_RGBA/GL_UNSIGNED_SHORT_5_5_5_1.
GL_ALPHA/GL_UNSIGNED_BYTE.
So the combination you tried to use could be supported by an implementation, if it returns the corresponding values from the glGetIntegerv() calls above. However, there was a subtle but important error in the arguments of your glReadPixels() call even if it is supported: GL_RGB565 is a value for a format, while the 6th argument is a type. The call would have to be:
glReadPixels(0 ,0, renderBufferWidth, renderBufferHeight,
GL_RGB, GL_UNSIGNED_SHORT_5_6_5, data2);

Something wrong with converting SDL surface to GL texture

I can't find my mistake, why text has not been created? When using texture instead of text I get nothing or black background with colored points, please help
GLuint texture;
SDL_Surface *text = NULL;
TTF_Font *font = NULL;
SDL_Color color = {0, 0, 0};
font = TTF_OpenFont("../test.ttf", 20);
text = TTF_RenderText_Solid(font, "Hello, SDL !!!", color);
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, text->w, text->h, 0, GL_RGB, GL_UNSIGNED_BYTE, text->pixels);
SDL_FreeSurface(text);
One thing you could add is to specify texture filters, e.g.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
Few things you have to check first
is the font loaded properly? check if "font == NULL", maybe your
font path is wrong
is the shader (if you use a shader) setup properly?
My guess is that you set the wrong pixel format type in glTexImage2D cause random color dots apear on your texture
Below is my code that load image via SDL_image for OpenGL use, I think it would be a good start to figure out what step you missed or forgot.
BTW, this code is not perfect. The types of pixel format is more than four (like index color) and I only handle some of them.
/*
* object_, originalWidth_ and originalHeight_ are private variables in
* this class, don't panic.
*/
void
Texture::Load(string filePath, GLint minMagFilter, GLint wrapMode)
{
SDL_Surface* image;
GLenum textureFormat;
GLint bpp; //Byte Per Pixel
/* Load image file */
image = IMG_Load(filePath.c_str());
if (image == nullptr) {
string msg("IMG error: ");
msg += IMG_GetError();
throw runtime_error(msg.c_str());
}
/* Find out pixel format type */
bpp = image->format->BytesPerPixel;
if (bpp == 4) {
if (image->format->Rmask == 0x000000ff)
textureFormat = GL_RGBA;
else
textureFormat = GL_BGRA;
} else if (bpp == 3) {
if (image->format->Rmask == 0x000000ff)
textureFormat = GL_RGB;
else
textureFormat = GL_BGR;
} else {
string msg("IMG error: Unknow pixel format, bpp = ");
msg += bpp;
throw runtime_error(msg.c_str());
}
/* Store widht and height */
originalWidth_ = image->w;
originalHeight_ = image->h;
/* Make OpenGL texture */
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &object_);
glBindTexture(GL_TEXTURE_2D, object_);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, minMagFilter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, minMagFilter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrapMode);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrapMode);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(
GL_TEXTURE_2D, // texture type
0, // level
bpp, // internal format
image->w, // width
image->h, // height
0, // border
textureFormat, // format(in this texture?)
GL_UNSIGNED_BYTE, // data type
image->pixels // pointer to data
);
/* Clean these mess up */
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
SDL_FreeSurface(image);
}
For more information, you should check out SDL wiki or deep into it's source code to fully understand the architecture of SDL_Surface.

OpenGL Texturing, no error but grey

Trying to colour terrain points based on texture colour (currently hard coded to vec2(0.5, 0.5) for test purposes - which should be light blue) but all the points are grey. glGetError returns 0 throughout the whole process. I think I might be doing the render process wrong or have a problem with my shaders(?)
Vertex Shader:
void main(){
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Fragment Shader:
uniform sampler2D myTextureSampler;
void main(){
gl_FragColor = texture2D(myTextureSampler, vec2(0.5, 0.5));
}
Terrain Class:
class Terrain
{
public:
Terrain(GLuint pProgram, char* pHeightmap, char* pTexture){
if(!LoadTerrain(pHeightmap))
{
OutputDebugString("Loading terrain failed.\n");
}
if(!LoadTexture(pTexture))
{
OutputDebugString("Loading terrain texture failed.\n");
}
mProgram = pProgram;
mTextureLocation = glGetUniformLocation(pProgram, "myTextureSampler");
};
~Terrain(){};
void Draw()
{
glEnableClientState(GL_VERTEX_ARRAY); // Uncommenting this causes me to see nothing at all
glBindBuffer(GL_ARRAY_BUFFER, mVBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnable( GL_TEXTURE_2D );
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mBMP);
glProgramUniform1i(mProgram, mTextureLocation, 0);
GLenum a = glGetError();
glPointSize(5.0f);
glDrawArrays(GL_POINTS, 0, mNumberPoints);
a = glGetError();
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisable( GL_TEXTURE_2D );
glDisableClientState(GL_VERTEX_ARRAY);
}
private:
GLuint mVBO, mBMP, mUV, mTextureLocation, mProgram;
int mWidth;
int mHeight;
int mNumberPoints;
bool LoadTerrain(char* pFile)
{
/* Definitely no problem here - Vertex data is fine and rendering nice and dandy */
}
// TEXTURES MUST BE POWER OF TWO!!
bool LoadTexture(char *pFile)
{
unsigned char header[54]; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned int width, height;
unsigned int imageSize;
unsigned char * data;
FILE * file = fopen(pFile, "rb");
if(!file)
return false;
if(fread(header, 1, 54, file) != 54)
{
fclose(file);
return false;
}
if ( header[0]!='B' || header[1]!='M' )
{
fclose(file);
return false;
}
// Read ints from the byte array
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize==0) imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
if (dataPos==0) dataPos=54; // The BMP header is done that way
// Create a buffer
data = new unsigned char [imageSize];
// Read the actual data from the file into the buffer
fread(data,1,imageSize,file);
//Everything is in memory now, the file can be closed
fclose(file);
// Create one OpenGL texture
glGenTextures(1, &mBMP);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, mBMP);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
delete [] data;
data = 0;
return true;
}
};
Answering own question incase anyone has a similar problem:
I had tested this with multiple images - but it turns out theres a bug in my graphics application of choice; which has been exporting 8-bit Bitmap's even though I explicitally told it to export 24-bit Bitmap's. So basically - reverting back to MS Paint solved my solution. 3 cheers for MS Paint.