My question-> How do i make color 255,200,255 transparent in OpenGL? (by transparent i mean removing the pixels color 255,200,255 or whatever works...)
my texture loading functions are from this tutorial-> http://nehe.gamedev.net/data/lessons/lesson.asp?lesson=33
note that i don't have to use Alpha Channels, i have a set of pre-made images with custom color (255,200,255) which must be transparent/removed pixels..
some .tga loading functions of my program:
Texture AllTextures[1000];
typedef struct
{
GLubyte * imageData; // Image Data (Up To 32 Bits)
GLuint bpp; // Image Color Depth In Bits Per Pixel
GLuint width; // Image Width
GLuint height; // Image Height
GLuint texID; // Texture ID Used To Select A Texture
GLuint type; // Image Type (GL_RGB, GL_RGBA)
} Texture;
bool LoadUncompressedTGA(Texture * texture, char * filename, FILE * fTGA) // Load an uncompressed TGA (note, much of this code is based on NeHe's
{ // TGA Loading code nehe.gamedev.net)
if(fread(tga.header, sizeof(tga.header), 1, fTGA) == 0) // Read TGA header
{
MessageBox(NULL, "Could not read info header", "ERROR", MB_OK); // Display error
if(fTGA != NULL) // if file is still open
{
fclose(fTGA); // Close it
}
return false; // Return failular
}
texture->width = tga.header[1] * 256 + tga.header[0]; // Determine The TGA Width (highbyte*256+lowbyte)
texture->height = tga.header[3] * 256 + tga.header[2]; // Determine The TGA Height (highbyte*256+lowbyte)
texture->bpp = tga.header[4]; // Determine the bits per pixel
tga.Width = texture->width; // Copy width into local structure
tga.Height = texture->height; // Copy height into local structure
tga.Bpp = texture->bpp; // Copy BPP into local structure
if((texture->width <= 0) || (texture->height <= 0) || ((texture->bpp != 24) && (texture->bpp !=32))) // Make sure all information is valid
{
MessageBox(NULL, "Invalid texture information", "ERROR", MB_OK); // Display Error
if(fTGA != NULL) // Check if file is still open
{
fclose(fTGA); // If so, close it
}
return false; // Return failed
}
if(texture->bpp == 24) //If the BPP of the image is 24...
{
texture->type = GL_RGBA; // Set Image type to GL_RGB
}
else // Else if its 32 BPP
{
texture->type = GL_RGBA; // Set image type to GL_RGBA
}
tga.bytesPerPixel = (tga.Bpp / 8); // Compute the number of BYTES per pixel
tga.imageSize = (tga.bytesPerPixel * tga.Width * tga.Height); // Compute the total amout ofmemory needed to store data
texture->imageData = (GLubyte *)malloc(tga.imageSize); // Allocate that much memory
if(texture->imageData == NULL) // If no space was allocated
{
MessageBox(NULL, "Could not allocate memory for image", "ERROR", MB_OK); // Display Error
fclose(fTGA); // Close the file
return false; // Return failed
}
if(fread(texture->imageData, 1, tga.imageSize, fTGA) != tga.imageSize) // Attempt to read image data
{
MessageBox(NULL, "Could not read image data", "ERROR", MB_OK); // Display Error
if(texture->imageData != NULL) // If imagedata has data in it
{
free(texture->imageData); // Delete data from memory
}
fclose(fTGA); // Close file
return false; // Return failed
}
// Byte Swapping Optimized By Steve Thomas
for(GLuint cswap = 0; cswap < (int)tga.imageSize; cswap += tga.bytesPerPixel)
{
texture->imageData[cswap] ^= texture->imageData[cswap+2] ^=texture->imageData[cswap] ^= texture->imageData[cswap+2];
}
fclose(fTGA); // Close file
return true; // Return success
}
void LoadMyTextureTGA(int id,char* texturename)
{
//texturename ex: "Data/Uncompressed.tga"
if(LoadTGA(&AllTextures[id], texturename))
{
//success
glGenTextures(1, &AllTextures[id].texID); // Create The Texture ( CHANGE )
glBindTexture(GL_TEXTURE_2D, AllTextures[id].texID);
glTexImage2D(GL_TEXTURE_2D, 0, 3, AllTextures[id].width, AllTextures[id].height, 0, GL_RGB, GL_UNSIGNED_BYTE, AllTextures[id].imageData);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
if (AllTextures[id].imageData) // If Texture Image Exists ( CHANGE )
{
free(AllTextures[id].imageData); // Free The Texture Image Memory ( CHANGE )
}
}
else
{
MessageBoxA(0,"Textures Loading Fail! Game will close now","Game Problem",0);
exit(1);
}
}
if texture->bpp == 24 instead of 32 (which means there is no built-in alpha channel), you have to generate a 32-bit openGL texture, and set the alpha value of each texel to 255 iif the tga pixel is 255,200,255.
Well, if you're using textures, then I'm assuming you already have a fragment shader.
It's pretty easy actually.
First in the main program turn on alpha blending:
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
Then in the fragment shader add this code:
vec3 chromaKeyColor = texture(myTextureSampler,UV.xy).xyz;
float alpha;
if ((chromaKeyColor.x <= 0.01) && (chromaKeyColor.y <= 0.01) && (chromaKeyColor.z <= 0.01)){
alpha = 0.;
}
else
{
alpha = 1.0;
}
color = vec4(texture(myTextureSampler,VertexOut.texCoord.xy).xyz,alpha);
The above code will set black as the chroma key. (within a threshold of 0.01) You can choose another colour by looking up its RGB value. If it's not the chroma key colour, then set the alpha to full (1.0).
OpenGL and color-keying can be a little tricky. Using 1-bit alpha is probably easier, but I've seen SDL used for color-keying like you describe. This thread might help you out.
Related
I want to make a red texture image buffer. Would anyone help me to make it in right way. I have tried following but could not copy the buffer into ID3D11Texture2D. I need help:
std::vector<BYTE> redTexture(w*h*4);
const auto stride = w * 4;
BYTE* buf = redTexture.data();
for (int i = 0; i < h; ++i)
{
const auto redValue = Gdiplus::Color::Red;
memcpy(buf, &redValue, stride);
buf += stride;
}
If the DXGI_FORMAT of your texture is R8G8B8A8_UNORM, you can do it like this;
std::vector<BYTE> redTexture(w*h*4);
for(int i=0; i<redTexture.size(); i++)
{
if(i%4==0)
{
redTexture[i]=255;
}
else if(i%4==3)
{
redTexture[i]=255;
}
else
{
redTexture[i]=0;
}
}
Since each pixel consists of RGBA 4 byte value, you should assign first and last bytes to 255, other bytes to 0 to get the red color.
Thanks #HMD. I have solved the issue by doing the following:
CImage m_cImage;
// create a test image
m_cImage.Create(w, -h, 8 * 4); // 8 bpp * 4 channel
auto hdc = m_cImage.GetDC();
Gdiplus::Graphics graphics(hdc);
// Create a SolidBrush object.
Gdiplus::SolidBrush redBrush(Gdiplus::Color::Red);
// Fill the rectangle.
Gdiplus::Status status = graphics.FillRectangle(&redBrush, 0, 0, w, h);
TRY_CONDITION(status == Gdiplus::Status::Ok);
....
// Then saved the m_cImage.GetBits() to bmp file using Gdiplus::Bitmap
// and my expected texture is found
I have been following a tutorial on how to use ffmpeg and SDL to make a simple video player with no audio (yet). While looking through the tutorial I realized it was out of date and many of the functions it used, for both ffmpeg and SDL, were deprecated. So I searched for an up-to-date solution and found a stackoverflow question answer that completed what the tutorial was missing.
However, it uses YUV420 which is of low quality. I want to implement YUV444 and after studying chroma-subsampling for a bit and looking at the different formats for YUV am confused as to how to implement it. From what I understand YUV420 is a quarter of the quality YUV444 is. YUV444 means every pixel has its own chroma sample and as such is more detailed while YUV420 means pixels are grouped together and have the same chroma sample and therefore is less detailed.
And from what I understand the different formats of YUV(420, 422, 444) are different in the way they order y, u, and v. All of this is a bit overwhelming because I haven't done much with codecs, conversions, etc. Any help would be much appreciated and if additional info is needed please let me know before downvoting.
Here is the code from the answer I mentioned concerning the conversion to YUV420:
texture = SDL_CreateTexture(
renderer,
SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING,
pCodecCtx->width,
pCodecCtx->height
);
if (!texture) {
fprintf(stderr, "SDL: could not create texture - exiting\n");
exit(1);
}
// initialize SWS context for software scaling
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL);
// set up YV12 pixel array (12 bits per pixel)
yPlaneSz = pCodecCtx->width * pCodecCtx->height;
uvPlaneSz = pCodecCtx->width * pCodecCtx->height / 4;
yPlane = (Uint8*)malloc(yPlaneSz);
uPlane = (Uint8*)malloc(uvPlaneSz);
vPlane = (Uint8*)malloc(uvPlaneSz);
if (!yPlane || !uPlane || !vPlane) {
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
exit(1);
}
uvPitch = pCodecCtx->width / 2;
while (av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if (packet.stream_index == videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
AVPicture pict;
pict.data[0] = yPlane;
pict.data[1] = uPlane;
pict.data[2] = vPlane;
pict.linesize[0] = pCodecCtx->width;
pict.linesize[1] = uvPitch;
pict.linesize[2] = uvPitch;
// Convert the image into YUV format that SDL uses
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pict.data,
pict.linesize);
SDL_UpdateYUVTexture(
texture,
NULL,
yPlane,
pCodecCtx->width,
uPlane,
uvPitch,
vPlane,
uvPitch
);
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
exit(0);
break;
default:
break;
}
}
// Free the YUV frame
av_frame_free(&pFrame);
free(yPlane);
free(uPlane);
free(vPlane);
// Close the codec
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrig);
// Close the video file
avformat_close_input(&pFormatCtx);
EDIT:
After more research I learned that in YUV420 is stored with all Y's first then a combination of U and V bytes one after another as illustrated by this image:
(source: wikimedia.org)
However I also learned that YUV444 is stored in the order U, Y, V and repeats like this picture shows:
I tried changing some things around in code:
// I changed SDL_PIXELFORMAT_YV12 to SDL_PIXELFORMAT_UYVY
// as to reflect the order of YUV444
texture = SDL_CreateTexture(
renderer,
SDL_PIXELFORMAT_UYVY,
SDL_TEXTUREACCESS_STREAMING,
pCodecCtx->width,
pCodecCtx->height
);
if (!texture) {
fprintf(stderr, "SDL: could not create texture - exiting\n");
exit(1);
}
// Changed AV_PIX_FMT_YUV420P to AV_PIX_FMT_YUV444P
// for rather obvious reasons
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV444P,
SWS_BILINEAR,
NULL,
NULL,
NULL);
// There are as many Y, U and V bytes as pixels I just
// made yPlaneSz and uvPlaneSz equal to the number of pixels
yPlaneSz = pCodecCtx->width * pCodecCtx->height;
uvPlaneSz = pCodecCtx->width * pCodecCtx->height;
yPlane = (Uint8*)malloc(yPlaneSz);
uPlane = (Uint8*)malloc(uvPlaneSz);
vPlane = (Uint8*)malloc(uvPlaneSz);
if (!yPlane || !uPlane || !vPlane) {
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
exit(1);
}
uvPitch = pCodecCtx->width * 2;
while (av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if (packet.stream_index == videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Rearranged the order of the planes to reflect UYV order
// then set linesize to the number of Y, U and V bytes
// per row
if (frameFinished) {
AVPicture pict;
pict.data[0] = uPlane;
pict.data[1] = yPlane;
pict.data[2] = vPlane;
pict.linesize[0] = pCodecCtx->width;
pict.linesize[1] = pCodecCtx->width;
pict.linesize[2] = pCodecCtx->width;
// Convert the image into YUV format that SDL uses
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pict.data,
pict.linesize);
SDL_UpdateYUVTexture(
texture,
NULL,
yPlane,
1,
uPlane,
uvPitch,
vPlane,
uvPitch
);
//.................................................
But now I get an access violation at the call to SDL_UpdateYUVTexture... I'm honestly not sure what's wrong. I think it may have to do with setting AVPicture pic's member data and linesize improperly but I'm not positive.
After many hours of scouring the web for possible answers I stumbled upon this post in which someone was asking about YUV444 support for packed or planar mode. The only current format I've found is AYUV which is packed.
The answer they got was a list of all the currently supported formats which did not include AYUV. Therefore SDL does not support YUV444.
The only solution is to use a different library that supports AYUV / YUV444.
We want to create an SDL surface by loading an image with SDL_Image and if the dimensions exceed a limit resize the surface.
The reason we need to do this is on Raspbian SDL throws an error creating a texture from the surface ('Texture dimensions are limited to 2048x2048'). Whilst that's a very large image we don't want users to be concerned about image size, we want to resize it for them. Although we haven't encountered this limit on Windows, we're trying to develop the solution on windows and having issues resizing the texture.
Looking for a solution there have been similar questions...:
2008 not SDL2 custom blitting
2010 use SDL_gfx
2008 can't be done use SDL_gfx, 2015 use SDL_BlitScaled, 2015 use SDL_RenderCopyEx
Is a custom blitter or SDL_gfx necessary with current SDL2 (those answers pre-date SDL2's 2013 release)? SDLRenderCopyEx doesn't help as you need to generate the texture which is where our problem occurs.
So we tried some of the available blitting functions like SDL_BlitScaled, below is a simple program to render a 2500x2500 PNG with no scaling:
#include <SDL.h>
#include <SDL_image.h>
#include <sstream>
#include <string>
SDL_Texture * get_texture(
SDL_Renderer * pRenderer,
std::string image_filename) {
SDL_Texture * result = NULL;
SDL_Surface * pSurface = IMG_Load(image_filename.c_str());
if (pSurface == NULL) {
printf("Error image load: %s\n", IMG_GetError());
}
else {
SDL_Texture * pTexture = SDL_CreateTextureFromSurface(pRenderer, pSurface);
if (pTexture == NULL) {
printf("Error image load: %s\n", SDL_GetError());
}
else {
SDL_SetTextureBlendMode(
pTexture,
SDL_BLENDMODE_BLEND);
result = pTexture;
}
SDL_FreeSurface(pSurface);
pSurface = NULL;
}
return result;
}
int main(int argc, char* args[]) {
SDL_Window * pWindow = NULL;
SDL_Renderer * pRenderer = NULL;
// set up
SDL_Init(SDL_INIT_VIDEO);
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "1");
SDL_Rect screenDimensions;
screenDimensions.x = 0;
screenDimensions.y = 0;
screenDimensions.w = 640;
screenDimensions.h = 480;
pWindow = SDL_CreateWindow("Resize Test",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
screenDimensions.w,
screenDimensions.h,
SDL_WINDOW_SHOWN);
pRenderer = SDL_CreateRenderer(pWindow,
-1,
SDL_RENDERER_ACCELERATED);
IMG_Init(IMG_INIT_PNG);
// render
SDL_SetRenderDrawColor(
pRenderer,
0,
0,
0,
0);
SDL_RenderClear(pRenderer);
SDL_Texture * pTexture = get_texture(
pRenderer,
"2500x2500.png");
if (pTexture != NULL) {
SDL_RenderCopy(
pRenderer,
pTexture,
NULL,
&screenDimensions);
SDL_DestroyTexture(pTexture);
pTexture = NULL;
}
SDL_RenderPresent(pRenderer);
// wait for quit
bool quit = false;
while (!quit)
{
// poll input for quit
SDL_Event inputEvent;
while (SDL_PollEvent(&inputEvent) != 0) {
if ((inputEvent.type == SDL_KEYDOWN) &&
(inputEvent.key.keysym.sym == 113)) {
quit = true;
}
}
}
IMG_Quit();
SDL_DestroyRenderer(pRenderer);
pRenderer = NULL;
SDL_DestroyWindow(pWindow);
pWindow = NULL;
return 0;
}
Changing the get_texture function so it identifies a limit and tries to create a new surface:
SDL_Texture * get_texture(
SDL_Renderer * pRenderer,
std::string image_filename) {
SDL_Texture * result = NULL;
SDL_Surface * pSurface = IMG_Load(image_filename.c_str());
if (pSurface == NULL) {
printf("Error image load: %s\n", IMG_GetError());
}
else {
const int limit = 1024;
int width = pSurface->w;
int height = pSurface->h;
if ((width > limit) ||
(height > limit)) {
SDL_Rect sourceDimensions;
sourceDimensions.x = 0;
sourceDimensions.y = 0;
sourceDimensions.w = width;
sourceDimensions.h = height;
float scale = (float)limit / (float)width;
float scaleH = (float)limit / (float)height;
if (scaleH < scale) {
scale = scaleH;
}
SDL_Rect targetDimensions;
targetDimensions.x = 0;
targetDimensions.y = 0;
targetDimensions.w = (int)(width * scale);
targetDimensions.h = (int)(height * scale);
SDL_Surface *pScaleSurface = SDL_CreateRGBSurface(
pSurface->flags,
targetDimensions.w,
targetDimensions.h,
pSurface->format->BitsPerPixel,
pSurface->format->Rmask,
pSurface->format->Gmask,
pSurface->format->Bmask,
pSurface->format->Amask);
if (SDL_BlitScaled(pSurface, NULL, pScaleSurface, &targetDimensions) < 0) {
printf("Error did not scale surface: %s\n", SDL_GetError());
SDL_FreeSurface(pScaleSurface);
pScaleSurface = NULL;
}
else {
SDL_FreeSurface(pSurface);
pSurface = pScaleSurface;
width = pSurface->w;
height = pSurface->h;
}
}
SDL_Texture * pTexture = SDL_CreateTextureFromSurface(pRenderer, pSurface);
if (pTexture == NULL) {
printf("Error image load: %s\n", SDL_GetError());
}
else {
SDL_SetTextureBlendMode(
pTexture,
SDL_BLENDMODE_BLEND);
result = pTexture;
}
SDL_FreeSurface(pSurface);
pSurface = NULL;
}
return result;
}
SDL_BlitScaled fails with an error 'Blit combination not supported' other variations have a similar error:
SDL_BlitScaled(pSurface, NULL, pScaleSurface, NULL)
SDL_BlitScaled(pSurface, &sourceDimensions, pScaleSurface, &targetDimensions)
SDL_LowerBlitScaled(pSurface, &sourceDimensions, pScaleSurface, &targetDimensions) // from the wiki this is the call SDL_BlitScaled makes internally
Then we tried a non-scaled blit... which didn't throw an error but just shows white (not the clear colour or a colour in the image).
SDL_BlitSurface(pSurface, &targetDimensions, pScaleSurface, &targetDimensions)
With that blitting function not working we then tried it with the same image as a bitmap (just exporting the .png as .bmp), still loading the file with SDL_Image and both those functions work with SDL_BlitScaled scaling as expected 😐
Not sure what's going wrong here (we expect and need support for major image file formats like .png) or if this is the recommended approach, any help appreciated!
TL;DR The comment from #kelter pointed me in the right direction and another stack overflow question gave me a solution: it works if you first Blit to a 32bpp surface and then BlitScaled to another 32bpp surface. That worked for 8 and 24 bit depth pngs, 32 bit were invisible again another stack overflow question suggested first filling the surface before blitting.
An updated get_texture function:
SDL_Texture * get_texture(
SDL_Renderer * pRenderer,
std::string image_filename) {
SDL_Texture * result = NULL;
SDL_Surface * pSurface = IMG_Load(image_filename.c_str());
if (pSurface == NULL) {
printf("Error image load: %s\n", IMG_GetError());
}
else {
const int limit = 1024;
int width = pSurface->w;
int height = pSurface->h;
if ((width > limit) ||
(height > limit)) {
SDL_Rect sourceDimensions;
sourceDimensions.x = 0;
sourceDimensions.y = 0;
sourceDimensions.w = width;
sourceDimensions.h = height;
float scale = (float)limit / (float)width;
float scaleH = (float)limit / (float)height;
if (scaleH < scale) {
scale = scaleH;
}
SDL_Rect targetDimensions;
targetDimensions.x = 0;
targetDimensions.y = 0;
targetDimensions.w = (int)(width * scale);
targetDimensions.h = (int)(height * scale);
// create a 32 bits per pixel surface to Blit the image to first, before BlitScaled
// https://stackoverflow.com/questions/33850453/sdl2-blit-scaled-from-a-palettized-8bpp-surface-gives-error-blit-combination/33944312
SDL_Surface *p32BPPSurface = SDL_CreateRGBSurface(
pSurface->flags,
sourceDimensions.w,
sourceDimensions.h,
32,
pSurface->format->Rmask,
pSurface->format->Gmask,
pSurface->format->Bmask,
pSurface->format->Amask);
if (SDL_BlitSurface(pSurface, NULL, p32BPPSurface, NULL) < 0) {
printf("Error did not blit surface: %s\n", SDL_GetError());
}
else {
// create another 32 bits per pixel surface are the desired scale
SDL_Surface *pScaleSurface = SDL_CreateRGBSurface(
p32BPPSurface->flags,
targetDimensions.w,
targetDimensions.h,
p32BPPSurface->format->BitsPerPixel,
p32BPPSurface->format->Rmask,
p32BPPSurface->format->Gmask,
p32BPPSurface->format->Bmask,
p32BPPSurface->format->Amask);
// 32 bit per pixel surfaces (loaded from the original file) won't scale down with BlitScaled, suggestion to first fill the surface
// 8 and 24 bit depth pngs did not require this
// https://stackoverflow.com/questions/20587999/sdl-blitscaled-doesnt-work
SDL_FillRect(pScaleSurface, &targetDimensions, SDL_MapRGBA(pScaleSurface->format, 255, 0, 0, 255));
if (SDL_BlitScaled(p32BPPSurface, NULL, pScaleSurface, NULL) < 0) {
printf("Error did not scale surface: %s\n", SDL_GetError());
SDL_FreeSurface(pScaleSurface);
pScaleSurface = NULL;
}
else {
SDL_FreeSurface(pSurface);
pSurface = pScaleSurface;
width = pSurface->w;
height = pSurface->h;
}
}
SDL_FreeSurface(p32BPPSurface);
p32BPPSurface = NULL;
}
SDL_Texture * pTexture = SDL_CreateTextureFromSurface(pRenderer, pSurface);
if (pTexture == NULL) {
printf("Error image load: %s\n", SDL_GetError());
}
else {
SDL_SetTextureBlendMode(
pTexture,
SDL_BLENDMODE_BLEND);
result = pTexture;
}
SDL_FreeSurface(pSurface);
pSurface = NULL;
}
return result;
}
The comment from #kelter had me look more closely at the surface pixel formats, bitmaps were working at 24bpp, pngs were being loaded at 8bpp and not working. Tried changing the target surface to 24 or 32 bpp but that didn't help. We had generated the png with auto-detected bit depth, setting it to 8 or 24 and performing the BlitScaled on a surface with the same bits-per-pixel worked although it didn't work for 32. Googling the blit conversion error lead to the question and answer from #Petruza.
Update Was a bit quick writing up this answer, the original solution handled bmp and 8 and 24 bit pngs but 32 bit pngs weren't rendering. #Retired Ninja answer to another question about Blit_Scaled suggested filling the surface before calling the function and that sorts it, there's another question related to setting alpha on new surfaces that may be relevant to this (particularily if you needed transparency) but filling with a solid colour is enough for me... for now.
i want to draw some openGl in a simple MFC :
process:
i start a fresh project MFC/SDI
i link opengl32.lib/glu32.lib to my project , i include gl.h and glu.h too.
i implements the opengl's requierts methods (onCreate/precreate/PIXELFORMATDESCRIPTOR) (https://www.microsoft.com/msj/archive/S2085.aspx )
i can build without error
on my onDraw fonction, if i make nothing : nothing happend ! ( \o/)
when i start to write some opengl code => black screen ( i test this code before, in a valid mfc-opengl project , and it's works )
my onDraw() :
void CMFCopenGLtestView::OnDraw(CDC* pDC){
ValidateRect(NULL);
CRect rect;
GetClientRect(&rect);
pDC->SetTextColor(RGB(255, 0, 0));
wglMakeCurrent(m_hDC, m_hRC);
//://
// HERE i WANT TO DRAW SOME OPENGL
//://
SwapBuffers(m_hDC);
wglMakeCurrent(m_hDC, NULL);
pDC->DrawText(_T("test"), 16, &rect, 1);
}
onCreate()
int CMFCopenGLtestView::OnCreate(LPCREATESTRUCT lpCreateStruct)
{
if (CView::OnCreate(lpCreateStruct) == -1)
return -1;
// TODO: Add your specialized creation code here
InitializeOpenGL();
return 0;
}
BOOL CMFCopenGLtestView::InitializeOpenGL()
{
m_pDC = new CClientDC(this);
if (NULL == m_pDC) // failure to get DC
{
return FALSE;
}
if (!SetupPixelFormat(NULL))
{
return FALSE;
}
if (0 == (m_hRC =
::wglCreateContext(m_pDC->GetSafeHdc())))
{
return FALSE;
}
if (FALSE ==
::wglMakeCurrent(m_pDC->GetSafeHdc(), m_hRC))
{
return FALSE;
}
// specify black as clear color
::glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
// specify the back of the buffer as clear depth
::glClearDepth(1.0f);
// enable depth testing
::glEnable(GL_DEPTH_TEST);
return TRUE;
}
BOOL CMFCopenGLtestView::SetupPixelFormat(
PIXELFORMATDESCRIPTOR* pPFD)
{
// default pixel format for a single-buffered,
// OpenGL-supporting, hardware-accelerated,
// RGBA-mode format. Pass in a pointer to a different
// pixel format if you want something else
PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR),// size of this pfd
1, // version number
PFD_DRAW_TO_WINDOW | // support window
PFD_SUPPORT_OPENGL, // support OpenGL
PFD_TYPE_RGBA, // RGBA type
24, // 24-bit color depth
0, 0, 0, 0, 0, 0, // color bits ignored
0, // no alpha buffer
0, // shift bit ignored
0, // no accumulation buffer
0, 0, 0, 0, // accum bits ignored
16, // 16-bit z-buffer
0, // no stencil buffer
0, // no auxiliary buffer
PFD_MAIN_PLANE, // main layer
0, // reserved
0, 0, 0 // layer masks ignored
};
int pixelformat;
PIXELFORMATDESCRIPTOR* pPFDtoUse;
// let the user override the default pixel format
pPFDtoUse = (0 == pPFD) ? &pfd : pPFD;
if (0 == (pixelformat =
::ChoosePixelFormat(m_pDC->GetSafeHdc(), pPFDtoUse)))
{
return FALSE;
}
if (FALSE == ::SetPixelFormat(m_pDC->GetSafeHdc(),
pixelformat, pPFDtoUse))
{
return FALSE;
}
return TRUE;
}
If you have an idea about what i'm doing wrong i'm taking all of them !
I'm using the glgrab code to try and grab a full-screen screenshot of the Mac screen. However, I want the bitmap data to be in the GL_RGB format. That is, each pixel should be in the format:
0x00RRGGBB
The original code specified the GL_BGRA format. However, changing that to GL_RGB gives me a completely blank result. The total source code I'm using is:
CGImageRef grabViaOpenGL(CGDirectDisplayID display, CGRect srcRect)
{
CGContextRef bitmap;
CGImageRef image;
void * data;
long bytewidth;
GLint width, height;
long bytes;
CGColorSpaceRef cSpace = CGColorSpaceCreateWithName (kCGColorSpaceGenericRGB);
CGLContextObj glContextObj;
CGLPixelFormatObj pixelFormatObj ;
GLint numPixelFormats ;
//CGLPixelFormatAttribute
int attribs[] =
{
// kCGLPFAClosestPolicy,
kCGLPFAFullScreen,
kCGLPFADisplayMask,
NULL, /* Display mask bit goes here */
kCGLPFAColorSize, 24,
kCGLPFAAlphaSize, 0,
kCGLPFADepthSize, 32,
kCGLPFASupersample,
NULL
} ;
if ( display == kCGNullDirectDisplay )
display = CGMainDisplayID();
attribs[2] = CGDisplayIDToOpenGLDisplayMask(display);
/* Build a full-screen GL context */
CGLChoosePixelFormat( (CGLPixelFormatAttribute*) attribs, &pixelFormatObj, &numPixelFormats );
if ( pixelFormatObj == NULL ) // No full screen context support
{
// GL didn't find any suitable pixel formats. Try again without the supersample bit:
attribs[10] = NULL;
CGLChoosePixelFormat( (CGLPixelFormatAttribute*) attribs, &pixelFormatObj, &numPixelFormats );
if (pixelFormatObj == NULL)
{
qDebug("Unable to find an openGL pixel format that meets constraints");
return NULL;
}
}
CGLCreateContext( pixelFormatObj, NULL, &glContextObj ) ;
CGLDestroyPixelFormat( pixelFormatObj ) ;
if ( glContextObj == NULL )
{
qDebug("Unable to create OpenGL context");
return NULL;
}
CGLSetCurrentContext( glContextObj ) ;
CGLSetFullScreen( glContextObj ) ;
glReadBuffer(GL_FRONT);
width = srcRect.size.width;
height = srcRect.size.height;
bytewidth = width * 4; // Assume 4 bytes/pixel for now
bytewidth = (bytewidth + 3) & ~3; // Align to 4 bytes
bytes = bytewidth * height; // width * height
/* Build bitmap context */
data = malloc(height * bytewidth);
if ( data == NULL )
{
CGLSetCurrentContext( NULL );
CGLClearDrawable( glContextObj ); // disassociate from full screen
CGLDestroyContext( glContextObj ); // and destroy the context
qDebug("OpenGL drawable clear failed");
return NULL;
}
bitmap = CGBitmapContextCreate(data, width, height, 8, bytewidth,
cSpace, kCGImageAlphaNoneSkipFirst /* XRGB */);
CFRelease(cSpace);
/* Read framebuffer into our bitmap */
glFinish(); /* Finish all OpenGL commands */
glPixelStorei(GL_PACK_ALIGNMENT, 4); /* Force 4-byte alignment */
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glPixelStorei(GL_PACK_SKIP_ROWS, 0);
glPixelStorei(GL_PACK_SKIP_PIXELS, 0);
/*
* Fetch the data in XRGB format, matching the bitmap context.
*/
glReadPixels((GLint)srcRect.origin.x, (GLint)srcRect.origin.y, width, height,
GL_RGB,
#ifdef __BIG_ENDIAN__
GL_UNSIGNED_INT_8_8_8_8_REV, // for PPC
#else
GL_UNSIGNED_INT_8_8_8_8, // for Intel! http://lists.apple.com/archives/quartz-dev/2006/May/msg00100.html
#endif
data);
/*
* glReadPixels generates a quadrant I raster, with origin in the lower left
* This isn't a problem for signal processing routines such as compressors,
* as they can simply use a negative 'advance' to move between scanlines.
* CGImageRef and CGBitmapContext assume a quadrant III raster, though, so we need to
* invert it. Pixel reformatting can also be done here.
*/
swizzleBitmap(data, bytewidth, height);
/* Make an image out of our bitmap; does a cheap vm_copy of the bitmap */
image = CGBitmapContextCreateImage(bitmap);
/* Get rid of bitmap */
CFRelease(bitmap);
free(data);
/* Get rid of GL context */
CGLSetCurrentContext( NULL );
CGLClearDrawable( glContextObj ); // disassociate from full screen
CGLDestroyContext( glContextObj ); // and destroy the context
/* Returned image has a reference count of 1 */
return image;
}
I'm completely new to OpenGL, so I'd appreciate some pointers in the right direction. Cheers!
Update:
After some experimentation, I have managed to narrow my problem down. My problem is that while I don't want the alpha component, I Do want each pixel to be packed to 4-byte boundaries. Now, when I specify GL_RGB or GL_BGR formats to the glReadPixels call, I get the bitmap data packed in 3 byte blocks. When I specify GL_RGBA, or GL_BGRA, I get four byte blocks, but always with the alpha channel component last.
I then tried changing the value passed to
bitmap = CGBitmapContextCreate(data, width, height, 8, bytewidth,cSpace, kCGImageAlphaNoneSkipFirst /* XRGB */);
however, no variations of AlphaNoneSkipFirst or AlphaNoneSkipLast puts the alpha channel at the start of the pixel byte block.
Any ideas?
I'm not a Mac guy, but if you can get RGBA data and want XRGB, can't you just bitshift each pixel down eight bits?
foreach( unsigned int* RGBA_pixel, pixbuf )
{
(*RGBA_pixel) = (*RGBA_pixel) >> 8;
}
Try with GL_UNSIGNED_BYTE instead of GL_UNSIGNED_INT_8_8_8_8_REV / GL_UNSIGNED_INT_8_8_8_8.
Although it seems you want GL_RGBA instead -- then it should work with either 8_8_8_8_REV or 8_8_8_8 instead.
When I use GL_BGRA the data is returned pre-swizzled which is confirmed because the colors look correct when i display the result in a window.
Contact me if you want the project I created. Hope this helps.