Something wrong with converting SDL surface to GL texture - c++

I can't find my mistake, why text has not been created? When using texture instead of text I get nothing or black background with colored points, please help
GLuint texture;
SDL_Surface *text = NULL;
TTF_Font *font = NULL;
SDL_Color color = {0, 0, 0};
font = TTF_OpenFont("../test.ttf", 20);
text = TTF_RenderText_Solid(font, "Hello, SDL !!!", color);
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, text->w, text->h, 0, GL_RGB, GL_UNSIGNED_BYTE, text->pixels);
SDL_FreeSurface(text);

One thing you could add is to specify texture filters, e.g.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);

Few things you have to check first
is the font loaded properly? check if "font == NULL", maybe your
font path is wrong
is the shader (if you use a shader) setup properly?
My guess is that you set the wrong pixel format type in glTexImage2D cause random color dots apear on your texture
Below is my code that load image via SDL_image for OpenGL use, I think it would be a good start to figure out what step you missed or forgot.
BTW, this code is not perfect. The types of pixel format is more than four (like index color) and I only handle some of them.
/*
* object_, originalWidth_ and originalHeight_ are private variables in
* this class, don't panic.
*/
void
Texture::Load(string filePath, GLint minMagFilter, GLint wrapMode)
{
SDL_Surface* image;
GLenum textureFormat;
GLint bpp; //Byte Per Pixel
/* Load image file */
image = IMG_Load(filePath.c_str());
if (image == nullptr) {
string msg("IMG error: ");
msg += IMG_GetError();
throw runtime_error(msg.c_str());
}
/* Find out pixel format type */
bpp = image->format->BytesPerPixel;
if (bpp == 4) {
if (image->format->Rmask == 0x000000ff)
textureFormat = GL_RGBA;
else
textureFormat = GL_BGRA;
} else if (bpp == 3) {
if (image->format->Rmask == 0x000000ff)
textureFormat = GL_RGB;
else
textureFormat = GL_BGR;
} else {
string msg("IMG error: Unknow pixel format, bpp = ");
msg += bpp;
throw runtime_error(msg.c_str());
}
/* Store widht and height */
originalWidth_ = image->w;
originalHeight_ = image->h;
/* Make OpenGL texture */
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &object_);
glBindTexture(GL_TEXTURE_2D, object_);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, minMagFilter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, minMagFilter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrapMode);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrapMode);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(
GL_TEXTURE_2D, // texture type
0, // level
bpp, // internal format
image->w, // width
image->h, // height
0, // border
textureFormat, // format(in this texture?)
GL_UNSIGNED_BYTE, // data type
image->pixels // pointer to data
);
/* Clean these mess up */
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
SDL_FreeSurface(image);
}
For more information, you should check out SDL wiki or deep into it's source code to fully understand the architecture of SDL_Surface.

Related

Assimp load fbx texture failed, POSSIBLE ISSUE: unit 0 GLD_TEXTURE_INDEX_2D is unloadable and bound to sampler type (Float)

I am using this code to load FBX (note: specific for FBX), the textures unable to load successfully
for (unsigned int i = 0; i < mat->GetTextureCount(type); i++) {
aiString str;
mat->GetTexture(type, i, &str);
if (auto texture_inside = scene->GetEmbeddedTexture(str.C_Str())) {
unsigned char *image_data = nullptr;
int width, height, nrComponents;
if (texture_inside->mHeight == 0) {
image_data = stbi_load_from_memory(
reinterpret_cast<unsigned char *>(texture_inside->pcData),
texture_inside->mWidth, &width, &height, &nrComponents, 0);
} else {
image_data = stbi_load_from_memory(
reinterpret_cast<unsigned char *>(texture_inside->pcData),
texture_inside->mWidth * texture_inside->mHeight, &width, &height,
&nrComponents, 0);
}
if (image_data) {
GLenum format;
if (nrComponents == 1)
format = GL_RED;
else if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
unsigned int t_id;
glGenTextures(1, &t_id);
glBindTexture(GL_TEXTURE_2D, t_id);
glTexImage2D(GL_TEXTURE_2D, 0, format, texture_inside->mWidth,
texture_inside->mHeight, 0, format, GL_UNSIGNED_BYTE,
image_data);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
delete image_data;
AnimTexture texture;
texture.id = t_id;
texture.type_name = typeName;
texture.file_path = str.C_Str();
textures.push_back(texture);
}
LOG(INFO) << "loading texture from embeded: " << str.C_Str();
}
}
then I got error message like this:
UNSUPPORTED (log once): POSSIBLE ISSUE: unit 0 GLD_TEXTURE_INDEX_2D is unloadable and bound to sampler type (Float) - using zero texture because texture unloadable
My question is:
How to load FBX embedded texture in a correct workable way?
what did I miss here caused above errors possibly?
currently I only got wrong black dark texture.
This is a common question in the assimp-project. You can find an example how to load embedded textures here: How to deal with embedded textures
In short:
Get the data from the embedded texture
Encode it with a image-converter
Put it into your texture on the GPU

Surface poorly filled with sdl_ttf

I'm trying to make an openGL game in c++ and I'm trying to implement a text system,
to do this I'm trying to use SDL_ttf.
I already used SDL_ttf in an other project but with another api, so I made the same code but it happened to not fill the pixel data of the surface.
Here is my code :
void Text2Texture::setText(const char * text, size_t fontIndex){
SDL_Color c = {255, 255, 0, 255};
SDL_Surface * surface;
surface = TTF_RenderUTF8_Blended(loadedFonts_[fontIndex], text, c);
if(surface == nullptr) {
fprintf(stderr, "Error TTF_RenderText\n");
return;
}
GLenum texture_format;
GLint colors = surface->format->BytesPerPixel;
if (colors == 4) { // alpha
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGBA;
else
texture_format = GL_BGRA_EXT;
} else { // no alpha
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGB;
else
texture_format = GL_BGR_EXT;
}
glBindTexture(GL_TEXTURE_2D, textureId_);
glTexImage2D(GL_TEXTURE_2D, 0, colors, surface->w, surface->h, 0, texture_format, GL_UNSIGNED_BYTE, surface->pixels);
///This line tell me pixel data is 8 bit witch isn't good ?
std::cout << "pixel size : " << sizeof(surface->pixels) << std::endl;
///This line give me correct result
fprintf(stderr, "texture size : %d %d\n", surface->w, surface->h);
glBindTexture(GL_TEXTURE_2D, 0);
}
As you can see in the comment, the pointer pixels in surface have a size of 8 bit, witch is way too low for a texture. I don't know why It do that.
At the end, the texture data look to be fully filled with 0 (resulting with a black squad using very basic shaders).
In this project I'm using glfw to create an openGL context so I'm not using sdl and I did not initialized it.
However, I did initialize sdl_ttf, here is all I did before calling setText :
std::vector<TTF_Font *> Text2Texture::loadedFonts_;
void Text2Texture::init(){
if(TTF_Init() == -1) {
fprintf(stderr, "TTF_Init: %s\n", TTF_GetError());
}
}
int Text2Texture::loadFont(std::string const& fontPath){
loadedFonts_.emplace_back();
loadedFonts_.back() = TTF_OpenFont(fontPath.data(), 32);
if( loadedFonts_.back() == nullptr ) {
fprintf(stderr, "TTF_OpenFont: %s \n", TTF_GetError());
loadedFonts_.pop_back();
return -1;
}
return ((int)loadedFonts_.size() - 1);
}
///The constructor initialize the texture :
Text2Texture::Text2Texture(){
glGenTextures(1, &textureId_);
glBindTexture(GL_TEXTURE_2D, textureId_);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
My class got a static part here is it corp :
class Text2Texture {
public:
Text2Texture();
void setText(const char * text, size_t fontIndex = 0);
unsigned int getId() const;
//Partie static
static void init();
static void quit();
static int loadFont(std::string const& fontPath);
private:
unsigned int textureId_;
//Partie static
static std::vector<TTF_Font *> loadedFonts_;
};
I initialize sdl_ttf and load texture with static method then I create class instance to create specific texture.
If you find where is my mistake I would be pleased to read your answer.
(By the way, I'm not really sure using sdl_ttf is the good approach, if you have a better idea I would take it too but I would like to solve this problem first)
The format and type parameter of glTexImage2Dspecifiy how one single pixel is encoded.
When the texture font is created, each pixel is encoded to a single byte. This means your texture consist of a single color channel and each pixel has 1 byte.
I'm very sure that colors = surface->format->BytesPerPixel is 1.
Note that it is sufficient to encode the glyph in one color channel, because the glyph consists of information that would fit in a single byte.
By default, OpenGL assumes that the start of each row of an image is aligned 4 bytes. This is because the GL_UNPACK_ALIGNMENT parameter by default is 4. Since the image has 1 (red) color channel, and is tightly packed, the start of a row is possibly misaligned.
Change the GL_UNPACK_ALIGNMENT parameter to 1, before specifying the two-dimensional texture image (glTexImage2D).
Since the texture has only one (red) color channel, the green and blue color will be 0 and the alpha channel will be 1 when the texture is looked up. But you can treat green, blue and even alpha channels to be read from the red color channel, too.
This can be achieved by setting the texture swizzle parameters GL_TEXTURE_SWIZZLE_G, GL_TEXTURE_SWIZZLE_B respectively GL_TEXTURE_SWIZZLE_A. See glTexParameter.
Further, note that the texture parameter are stored in the texture object. glTexParameter changes the texture object which is currently bound to the specified target of the current texture unit. So it is sufficient to set the parameters once when the texture image is created.
In comparison, glPixelStore changes global states an ma have to be set to its default value after specifying the texture image (if later calls to glTexImage2D rely on it).
The specification of the 2-dimensional texture image and setting the parameters may look as follows:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, surface->w, surface->h, 0,
GL_RED, GL_UNSIGNED_BYTE, surface->pixels);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_G, GL_RED);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_RED);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_A, GL_RED);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

C++ OpenGL Texture not loading

void OGLRectangle::LoadTexture(const char* filename)
{
unsigned int texture;
int width, height;
BYTE * data;
FILE * file;
file = fopen(filename, "rb");
width = 1920;
height = 1080;
data = new BYTE[height * width * 3];
fread(data, width * height * 3, 1, file);
fclose(file);
glGenTextures(1.0, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
tex = texture;
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, 2, width, height,0, GL_RGB, GL_UNSIGNED_BYTE, data);
delete [] data;
}
I have this code to render in an image, the method is called with:
LoadTexture("C:\\Users\Rhys\Documents\Hills.bmp");
The file exists.
Then I'm trying to render it to the openGL window using;
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex);
glBegin(GL_QUADS);
glTexCoord2d(0.0, 0.0); glVertex2d(0.0, 0.0);
glTexCoord2d(1.0, 0.0); glVertex2d(100.0, 0.0);
glTexCoord2d(1.0, 1.0); glVertex2d(100.0, 100.0);
glTexCoord2d(0.0, 1.0); glVertex2d(0.0, 100.0);
glEnd();
glDisable(GL_TEXTURE_2D);
However, all I'm getting on screen is a darkish blue box, with no texture rendered in it.
I have searched for tutorials on how to do this, even asked my lecturer and I still cannot seem to find out why its not working.
Any help will be greatly appreciated.
The .bmp files loading must be little different
This code simply loads bmp file to memory m_pcbData without compression and indexed color support.
bool CBMPImage::LoadFromFile(const CString& FileName)
{
BITMAPINFOHEADER BitmapInfo;
ZeroMemory(&BitmapInfo, sizeof(BITMAPINFOHEADER));
BITMAPFILEHEADER BitmapFile;
ZeroMemory(&BitmapFile, sizeof(BITMAPFILEHEADER));
std::ifstream FileStream(FileName, std::ios::binary | std::ios::in);
if (!FileStream.good())
return false;
// Read bitmap file info
FileStream.read(reinterpret_cast<char*>(&BitmapFile), sizeof(BITMAPFILEHEADER));
// Read bitmap info
FileStream.read(reinterpret_cast<char*>(&BitmapInfo), sizeof(BITMAPINFOHEADER));
// Proper bitmap file supports only 1 plane
if (BitmapInfo.biPlanes != 1)
return false;
m_cbAlphaBits = 0;
m_cbRedBits = 0;
m_cbGreenBits = 0;
m_cbBlueBits = 0;
// Retrives bits per pixel info
m_cbBitsPerPel = (BMPbyte)BitmapInfo.biBitCount;
// Width and height of image
m_nWidth = BitmapInfo.biWidth;
m_nHeight = BitmapInfo.biHeight;
// Compute bitmap file size
m_nSize = 4 * ((m_nWidth * m_cbBitsPerPel + 31) / 32) * m_nHeight;
// Less important info
m_nPixelWidthPerMeter = BitmapInfo.biXPelsPerMeter;
m_nPixelHeightPerMeter = BitmapInfo.biYPelsPerMeter;
// Indexes info not important in our case
m_nClrCount = BitmapInfo.biClrUsed;
m_nClrImportant = BitmapInfo.biClrImportant;
// COMPRESSION MUST BE BI_RGB
m_Compression = (BMPCompression)BitmapInfo.biCompression;
delete [] m_pcbData;
m_pcbData = NULL;
// Allocate proper data size
m_pcbData = new BMPbyte[m_nSize];
// Read actual image data, considering offset of file header
FileStream.seekg(BitmapFile.bfOffBits);
FileStream.read(reinterpret_cast<char*>(m_pcbData), m_nSize);
FileStream.close();
return true;
}
than load bmp texture data to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Image.GetWidth(), Image.GetHeight(), 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, (GLvoid*)Image.GetImageData());
GL_BGR_EXT is important because bmp stores image data in reverse byte order.
Secondly you must specify your material color as white because of usage that texture environment GL_TEXTURE_ENV_MODE, GL_MODULATE
And as mentioned #Reto Koradi, you must specify to generate mipmaps before texture image loading using one of these function calls.
glGenerateMipmap(GL_TEXTURE_2D);
or
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
Plus as you used not power of two textures (width = 1920;
height = 1080;) it may not work.
You're setting the attribute to sample with mipmaps:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
You should only set that if your textures actually has mipmaps. To generate mipmaps, you can call:
glGenerateMipmap(GL_TEXTURE_2D);
after the glTexImage2D() call. Or you can simply set the sampler attribute to not use mipmaps:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
As has already been pointed out: If your image file is indeed a BMP, and not just a raw image file, your image loading code will also need work.

OpenGL Texturing, no error but grey

Trying to colour terrain points based on texture colour (currently hard coded to vec2(0.5, 0.5) for test purposes - which should be light blue) but all the points are grey. glGetError returns 0 throughout the whole process. I think I might be doing the render process wrong or have a problem with my shaders(?)
Vertex Shader:
void main(){
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Fragment Shader:
uniform sampler2D myTextureSampler;
void main(){
gl_FragColor = texture2D(myTextureSampler, vec2(0.5, 0.5));
}
Terrain Class:
class Terrain
{
public:
Terrain(GLuint pProgram, char* pHeightmap, char* pTexture){
if(!LoadTerrain(pHeightmap))
{
OutputDebugString("Loading terrain failed.\n");
}
if(!LoadTexture(pTexture))
{
OutputDebugString("Loading terrain texture failed.\n");
}
mProgram = pProgram;
mTextureLocation = glGetUniformLocation(pProgram, "myTextureSampler");
};
~Terrain(){};
void Draw()
{
glEnableClientState(GL_VERTEX_ARRAY); // Uncommenting this causes me to see nothing at all
glBindBuffer(GL_ARRAY_BUFFER, mVBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnable( GL_TEXTURE_2D );
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mBMP);
glProgramUniform1i(mProgram, mTextureLocation, 0);
GLenum a = glGetError();
glPointSize(5.0f);
glDrawArrays(GL_POINTS, 0, mNumberPoints);
a = glGetError();
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisable( GL_TEXTURE_2D );
glDisableClientState(GL_VERTEX_ARRAY);
}
private:
GLuint mVBO, mBMP, mUV, mTextureLocation, mProgram;
int mWidth;
int mHeight;
int mNumberPoints;
bool LoadTerrain(char* pFile)
{
/* Definitely no problem here - Vertex data is fine and rendering nice and dandy */
}
// TEXTURES MUST BE POWER OF TWO!!
bool LoadTexture(char *pFile)
{
unsigned char header[54]; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned int width, height;
unsigned int imageSize;
unsigned char * data;
FILE * file = fopen(pFile, "rb");
if(!file)
return false;
if(fread(header, 1, 54, file) != 54)
{
fclose(file);
return false;
}
if ( header[0]!='B' || header[1]!='M' )
{
fclose(file);
return false;
}
// Read ints from the byte array
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize==0) imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
if (dataPos==0) dataPos=54; // The BMP header is done that way
// Create a buffer
data = new unsigned char [imageSize];
// Read the actual data from the file into the buffer
fread(data,1,imageSize,file);
//Everything is in memory now, the file can be closed
fclose(file);
// Create one OpenGL texture
glGenTextures(1, &mBMP);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, mBMP);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
delete [] data;
data = 0;
return true;
}
};
Answering own question incase anyone has a similar problem:
I had tested this with multiple images - but it turns out theres a bug in my graphics application of choice; which has been exporting 8-bit Bitmap's even though I explicitally told it to export 24-bit Bitmap's. So basically - reverting back to MS Paint solved my solution. 3 cheers for MS Paint.

How to draw bitmap as OpenGL texture in C++?

I have a bitmap, and its handle (Win32 HBITMAP). Any suggestion of how to draw this bitmap on an OpenGL quad (with scaling and pulling the 4 corners of the bitmap to fit the 4 vertexes of the quad)?
You need to retrieve the data contained in the HBITMAP, see http://msdn.microsoft.com/en-us/library/dd144879(v=vs.85).aspx Then you can upload the DIB data to OpenGL using glTexImage2D or glTexSubImage2D
With a texture being created you can apply this like usual (enable texturing, give each corner of the quad a texture coordinate).
EDIT due to comment
This (untested!) code should do the trick
GLuint load_bitmap_to_texture(
HDC device_context,
HBITMAP bitmap_handle,
bool flip_image) /* untested */
{
const int BytesPerPixel = sizeof(DWORD);
SIZE bitmap_size;
if( !GetBitmapDimensionEx(bitmap_handle, &bitmap_size) )
return 0;
ssize_t bitmap_buffer_size = bitmap_size.cx * bitmap_size.cy * BytesPerPixel;
#ifdef USE_DWORD
DWORD *bitmap_buffer;
#else
void *bitmap_buffer;
#endif
bitmap_buffer = malloc(bitmap_buffer_size);
if( !bitmap_buffer )
return 0;
BITMAPINFO bitmap_info;
memset(&bitmap_info, 0, sizeof(bitmap_info));
bitmap_info.bmiHeader.biSize = sizeof(bitmap_info.bmiHeader);
bitmap_info.bmiHeader.biWidth = bitmap_size.cx;
bitmap_info.bmiHeader.biHeight = bitmap_size.cy;
bitmap_info.bmiHeader.biPlanes = 1;
bitmap_info.bmiHeader.biBitCount = BitsPerPixel;
bitmap_info.bmiHeader.biCompression = BI_RGB;
if( flip_image ) /* this tells Windows where to set the origin (top or bottom) */
bitmap_info.bmiHeader.biHeight *= -1;
if( !GetDIBits(device_context,
bitmap_handle,
0, bitmap_size.cy,
bitmap_buffer,
&bitmap_info,
DIB_RGB_COLORS /* irrelevant, but GetDIBits expects a valid value */ )
) {
free(bitmap_buffer);
return 0;
}
GLuint texture_name;
glGenTextures(1, &texture_name);
glBindTexture(GL_TEXTURE_2D, texture_name);
glPixelStorei(GL_UNPACK_SWAP_BYTES, GL_FALSE);
glPixelStorei(GL_UNPACK_LSB_FIRST, GL_TRUE);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
bitmap_size.cx, bitmap_size.cy, 0,
GL_RGBA,
#ifdef USE_DWORD
GL_UNSIGNED_INT_8_8_8_8,
#else
GL_UNSIGNED_BYTE,
#endif
bitmap_buffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
free(bitmap_buffer);
return texture_name;
}